From c9397770c008d427da0b7ad058782fc8564c10d3 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 8 Aug 2001 13:32:23 +0000 Subject: Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro * x86/x86-codegen.h, x86/test.c: added x86 code emitter with test. svn path=/trunk/mono/; revision=435 --- ChangeLog | 5 + x86/test.c | 211 ++++++++++ x86/x86-codegen.h | 1217 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1433 insertions(+) create mode 100644 ChangeLog create mode 100644 x86/test.c create mode 100644 x86/x86-codegen.h diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..57f43d2 --- /dev/null +++ b/ChangeLog @@ -0,0 +1,5 @@ +Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h, x86/test.c: added x86 code emitter with + test. + diff --git a/x86/test.c b/x86/test.c new file mode 100644 index 0000000..0ae82fe --- /dev/null +++ b/x86/test.c @@ -0,0 +1,211 @@ +#include "x86-codegen.h" +#include + +/* don't run the resulting program, it will destroy your computer, + * just objdump -d it to inspect we generated the correct assembler. + */ + +int main() { + unsigned char code [16000]; + unsigned char *p = code; + unsigned char *target; + unsigned long mem_addr = 0xdeadbeef; + int size, i; + + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + + x86_prolog (p, 16, X86_CALLER_REGS); + + x86_cmpxchg_reg_reg (p, X86_EAX, X86_EBP); + x86_cmpxchg_membase_reg (p, X86_EAX, 12, X86_EBP); + + x86_xchg_reg_reg (p, X86_EAX, X86_EBP, 4); + x86_xchg_reg_reg (p, X86_EAX, X86_EBP, 1); // FIXME? + x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBP, 4); + x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBP, 2); + x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBX, 1); // FIXME? + + x86_inc_reg (p, X86_EAX); + x86_inc_mem (p, mem_addr); + x86_inc_membase (p, X86_ESP, 4); + + x86_nop (p); + x86_nop (p); + + x86_dec_reg (p, X86_EAX); + x86_dec_reg (p, X86_ECX); + x86_dec_mem (p, mem_addr); + x86_dec_membase (p, X86_ESP, 4); + + x86_not_reg (p, X86_EDX); + x86_not_reg (p, X86_ECX); + x86_not_mem (p, mem_addr); + x86_not_membase (p, X86_ESP, 4); + x86_not_membase (p, X86_ESP, 0x4444444); + x86_not_membase (p, X86_EBP, 0x4444444); + x86_not_membase (p, X86_ECX, 0x4444444); + x86_not_membase (p, X86_EDX, 0); + x86_not_membase (p, X86_EBP, 0); + + x86_neg_reg (p, X86_EAX); + x86_neg_reg (p, X86_ECX); + x86_neg_mem (p, mem_addr); + x86_neg_membase (p, X86_ESP, 8); + + x86_alu_reg_imm (p, X86_ADD, X86_EAX, 5); + x86_alu_reg_imm (p, X86_ADD, X86_EBX, -10); + x86_alu_reg_imm (p, X86_SUB, X86_EDX, 7); + x86_alu_reg_imm (p, X86_OR, X86_ESP, 0xffffedaf); + x86_alu_reg_imm (p, X86_CMP, X86_ECX, 1); + x86_alu_mem_imm (p, X86_ADC, mem_addr, 2); + x86_alu_membase_imm (p, X86_ADC, X86_ESP, -4, 4); + x86_alu_membase_imm (p, X86_ADC, X86_ESP, -12, 0xffffedaf); + + x86_alu_mem_reg (p, X86_SUB, mem_addr, X86_EDX); + x86_alu_reg_reg (p, X86_ADD, X86_EAX, X86_EBX); + x86_alu_reg_mem (p, X86_ADD, X86_EAX, mem_addr); + x86_alu_reg_imm (p, X86_ADD, X86_EAX, 0xdeadbeef); + x86_alu_reg_membase (p, X86_XOR, X86_EDX, X86_ESP, 4); + x86_alu_membase_reg (p, X86_XOR, X86_EBP, 8, X86_ESI); + + x86_test_reg_imm (p, X86_EAX, 16); + x86_test_reg_imm (p, X86_EDX, -16); + x86_test_mem_imm (p, mem_addr, 1); + x86_test_membase_imm (p, X86_EBP, 8, 1); + + x86_test_reg_reg (p, X86_EAX, X86_EDX); + x86_test_mem_reg (p, mem_addr, X86_EDX); + x86_test_membase_reg (p, X86_ESI, 4, X86_EDX); + + x86_shift_reg_imm (p, X86_SHL, X86_EAX, 1); + x86_shift_reg_imm (p, X86_SHL, X86_EDX, 2); + + x86_shift_mem_imm (p, X86_SHL, mem_addr, 2); + x86_shift_membase_imm (p, X86_SHLR, X86_EBP, 8, 4); + + /* + * Shift by CL + */ + x86_shift_reg (p, X86_SHL, X86_EAX); + x86_shift_mem (p, X86_SHL, mem_addr); + + x86_mul_reg (p, X86_EAX, 0); + x86_mul_reg (p, X86_EAX, 1); + x86_mul_membase (p, X86_EBP, 8, 1); + + x86_imul_reg_reg (p, X86_EBX, X86_EDX); + x86_imul_reg_membase (p, X86_EBX, X86_EBP, 12); + + x86_imul_reg_reg_imm (p, X86_EBX, X86_EDX, 10); + x86_imul_reg_mem_imm (p, X86_EBX, mem_addr, 20); + x86_imul_reg_membase_imm (p, X86_EBX, X86_EBP, 16, 300); + + x86_div_reg (p, X86_EDX, 0); + x86_div_reg (p, X86_EDX, 1); + x86_div_mem (p, mem_addr, 1); + x86_div_membase (p, X86_ESI, 4, 1); + + x86_mov_mem_reg (p, mem_addr, X86_EAX, 4); + x86_mov_mem_reg (p, mem_addr, X86_EAX, 2); + x86_mov_mem_reg (p, mem_addr, X86_EAX, 1); + x86_mov_membase_reg (p, X86_EBP, 4, X86_EAX, 1); + + x86_mov_reg_reg (p, X86_EAX, X86_EAX, 1); + x86_mov_reg_reg (p, X86_EAX, X86_EAX, 4); + x86_mov_reg_mem (p, X86_EAX, mem_addr, 4); + + x86_mov_reg_imm (p, X86_EAX, 10); + x86_mov_mem_imm (p, mem_addr, 54, 4); + x86_mov_mem_imm (p, mem_addr, 54, 1); + + x86_lea_mem (p, X86_EDX, mem_addr); + /* test widen */ + + x86_cdq (p); + x86_wait (p); + + x86_fp_op_mem (p, X86_FADD, mem_addr, 1); + x86_fp_op_mem (p, X86_FSUB, mem_addr, 0); + x86_fp_op (p, X86_FSUB, 2); + x86_fp_op_reg (p, X86_FMUL, 1, 0); + x86_fstp (p, 2); + x86_fcompp (p); + x86_fnstsw (p); + x86_fnstcw (p, mem_addr); + x86_fnstcw_membase (p, X86_ESP, -8); + + x86_fldcw_membase (p, X86_ESP, -8); + x86_fchs (p); + x86_frem (p); + x86_fxch (p, 3); + x86_fcomip (p, 3); + x86_fld_membase (p, X86_ESP, -8, 1); + x86_fld_membase (p, X86_ESP, -8, 0); + x86_fld80_membase (p, X86_ESP, -8); + x86_fild_membase (p, X86_ESP, -8, 1); + x86_fild_membase (p, X86_ESP, -8, 0); + x86_fld_reg (p, 4); + x86_fldz (p); + x86_fld1 (p); + + x86_fst (p, mem_addr, 1, 0); + x86_fst (p, mem_addr, 1, 1); + x86_fst (p, mem_addr, 0, 1); + + x86_fist_pop_membase (p, X86_EDX, 4, 1); + x86_fist_pop_membase (p, X86_EDX, 4, 0); + + x86_push_reg (p, X86_EBX); + x86_push_membase (p, X86_EBP, 8); + x86_push_imm (p, -1); + x86_pop_reg (p, X86_EBX); + + x86_pushad (p); + x86_pushfd (p); + x86_popfd (p); + x86_popad (p); + + target = p; + + x86_jump32 (p, mem_addr); + x86_jump8 (p, 12); + x86_jump_reg (p, X86_EAX); + x86_jump_membase (p, X86_EDX, 16); + + x86_jump_code (p, target); + + x86_branch8 (p, X86_CC_EQ, 54, 1); + x86_branch32 (p, X86_CC_LT, 54, 0); + x86_branch (p, X86_CC_GT, target, 0); + x86_branch_disp (p, X86_CC_NE, -4, 0); + + x86_call_imm (p, printf); + x86_call_reg (p, X86_ECX); + + x86_sahf (p); + + x86_fsin (p); + x86_fcos (p); + x86_fabs (p); + x86_fpatan (p); + x86_fprem (p); + x86_fprem1 (p); + x86_frndint (p); + x86_fsqrt (p); + x86_fptan (p); + + x86_leave (p); + x86_ret (p); + x86_ret_imm (p, 24); + + x86_cmov_reg (p, X86_CC_GT, 1, X86_EAX, X86_EDX); + x86_cmov_membase (p, X86_CC_GT, 0, X86_EAX, X86_EDX, -4); + + x86_nop (p); + x86_epilog (p, X86_CALLEE_REGS); + + size = p-code; + for (i = 0; i < size; ++i) + printf (".byte %d\n", (unsigned int) code [i]); + return 0; +} diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h new file mode 100644 index 0000000..a699e5f --- /dev/null +++ b/x86/x86-codegen.h @@ -0,0 +1,1217 @@ +/* Copyright (C) 2000 Intel Corporation. All rights reserved. +// +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.1 2001/08/08 13:32:23 lupus Exp $ +*/ + +#ifndef X86_H +#define X86_H +#include +/* +// x86 register numbers +*/ +typedef enum { + X86_EAX = 0, + X86_ECX = 1, + X86_EDX = 2, + X86_EBX = 3, + X86_ESP = 4, + X86_EBP = 5, + X86_ESI = 6, + X86_EDI = 7, + X86_NREG +} X86_Reg_No; +/* +// opcodes for alu instructions +*/ +typedef enum { + X86_ADD = 0, + X86_OR = 1, + X86_ADC = 2, + X86_SBB = 3, + X86_AND = 4, + X86_SUB = 5, + X86_XOR = 6, + X86_CMP = 7, + X86_NALU +} X86_ALU_Opcode; +/* +// opcodes for shift instructions +*/ +typedef enum { + X86_SHLD, + X86_SHLR, + X86_SHL = 4, + X86_SHR = 5, + X86_SAR = 7, + X86_NSHIFT = 8 +} X86_Shift_Opcode; +/* +// opcodes for floating-point instructions +*/ +typedef enum { + X86_FADD = 0, + X86_FMUL = 1, + X86_FCOM = 2, + X86_FCOMP = 3, + X86_FSUB = 4, + X86_FSUBR = 5, + X86_FDIV = 6, + X86_FDIVR = 7, + X86_NFP = 8 +} X86_FP_Opcode; +/* +// integer conditions codes +*/ +typedef enum { + X86_CC_EQ = 0, + X86_CC_NE, + X86_CC_LT, + X86_CC_LE, + X86_CC_GT, + X86_CC_GE, + X86_CC_LZ, + X86_CC_GEZ, + X86_CC_P, + X86_CC_NP, + X86_NCC +} X86_CC; +/* +// prefix code +*/ +typedef enum { + X86_LOCK_PREFIX = 0xF0, + X86_REPNZ_PREFIX = 0xF2, + X86_REPZ_PREFIX = 0xF3, + X86_REP_PREFIX = 0xF3, + X86_CS_PREFIX = 0x2E, + X86_SS_PREFIX = 0x36, + X86_DS_PREFIX = 0x3E, + X86_ES_PREFIX = 0x26, + X86_FS_PREFIX = 0x64, + X86_GS_PREFIX = 0x65, + X86_OPERAND_PREFIX = 0x66, + X86_ADDRESS_PREFIX = 0x67 +} X86_Prefix; + +static const unsigned char +x86_cc_unsigned_map [X86_NCC] = { + 0x74, // eq + 0x75, // ne + 0x72, // lt + 0x76, // le + 0x77, // gt + 0x73, // ge + 0x78, // lz + 0x79, // gez + 0x7a, // p + 0x7b, // np +}; + +static const unsigned char +x86_cc_signed_map [X86_NCC] = { + 0x74, // eq + 0x75, // ne + 0x7c, // lt + 0x7e, // le + 0x7f, // gt + 0x7d, // ge + 0x78, // lz + 0x79, // gez + 0x7a, // p + 0x7b, // np +}; + +/* +// bitvector mask for callee-saved registers +*/ +#define X86_CALLEE_ESI_MASK (1<= -128 && (int)(imm) <= 127)) +#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1))) + +#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0) +#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) + +#define x86_membase_emit(inst,r,basereg,disp) do {\ + if ((basereg) == X86_ESP) { \ + if ((disp) == 0) { \ + x86_address_byte ((inst), 0, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + } else if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + break; \ + } \ + if ((disp) == 0 && (basereg) != X86_EBP) { \ + x86_address_byte ((inst), 0, (r), (basereg)); \ + break; \ + } \ + if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), (basereg)); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), (basereg)); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + } while (0) + +/* + * TODO: memindex_emit + */ + +#define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) + +#define x86_cmpxchg_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_cmpxchg_mem_reg(inst,mem,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_xchg_reg_reg(inst,dreg,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_xchg_mem_reg(inst,mem,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_inc_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_inc_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0) + +#define x86_dec_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 1, (mem)); \ + } while (0) + +#define x86_dec_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 1, (basereg), (disp)); \ + } while (0) + +#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0) + +#define x86_not_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 2, (mem)); \ + } while (0) + +#define x86_not_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } while (0) + +#define x86_not_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 2, (reg)); \ + } while (0) + +#define x86_neg_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 3, (mem)); \ + } while (0) + +#define x86_neg_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 3, (basereg), (disp)); \ + } while (0) + +#define x86_neg_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 3, (reg)); \ + } while (0) + +#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0) + +#define x86_alu_reg_imm(inst,opc,reg,imm) \ + do { \ + if ((reg) == X86_EAX) { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ + x86_imm_emit32 ((inst), (imm)); \ + break; \ + } \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_mem_imm(inst,opc,mem,imm) \ + do { \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \ + do { \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_mem_reg(inst,opc,mem,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_alu_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_alu_reg_mem(inst,opc,reg,mem) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_test_reg_imm(inst,reg,imm) \ + do { \ + if ((reg) == X86_EAX) { \ + *(inst)++ = (unsigned char)0xa9; \ + } else { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 0, (reg)); \ + } \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_mem_imm(inst,mem,imm) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_membase_imm(inst,basereg,disp,imm) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_test_mem_reg(inst,mem,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_test_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_shift_reg_imm(inst,opc,reg,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_reg_emit ((inst), (opc), (reg)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_mem_imm(inst,opc,mem,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_reg(inst,opc,reg) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_reg_emit ((inst), (opc), (reg)); \ + } while (0) + +#define x86_shift_mem(inst,opc,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } while (0) + +#define x86_shift_membase(inst,opc,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } while (0) + +/* + * Multi op shift missing. + */ + +/* + * EDX:EAX = EAX * rm + */ +#define x86_mul_reg(inst,reg,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \ + } while (0) + +#define x86_mul_mem(inst,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_mul_membase(inst,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +/* + * r *= rm + */ +#define x86_imul_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_imul_reg_mem(inst,reg,mem) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_imul_reg_membase(inst,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +/* + * dreg = rm * imm + */ +#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_mem_emit ((inst), (reg), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_reg_emit ((inst), (reg), (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +/* + * divide EDX:EAX by rm; + * eax = quotient, edx = remainder + */ + +#define x86_div_reg(inst,reg,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \ + } while (0) + +#define x86_div_mem(inst,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_div_membase(inst,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +#define x86_mov_mem_reg(inst,mem,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_mov_reg_reg(inst,dreg,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_mov_reg_mem(inst,reg,mem,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_mov_reg_membase(inst,reg,regbase,disp,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_mov_reg_imm(inst,reg,imm) \ + do { \ + if ((imm) == 0) { \ + x86_alu_reg_reg ((inst), X86_XOR, (reg), (reg)); \ + } else { \ + *(inst)++ = (unsigned char)0xb8 + (reg); \ + } \ + } while (0) + +#define x86_mov_mem_imm(inst,mem,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 4) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_lea_mem(inst,reg,mem) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_lea_membase(inst,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0) +#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0) + +#define x86_fp_op_mem(inst,opc,mem,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } while (0) + +#define x86_fp_op(inst,opc,index) \ + do { \ + *(inst)++ = (unsigned char)0xd8; \ + *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \ + } while (0) + +#define x86_fp_op_reg(inst,opc,index,pop_stack) \ + do { \ + static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \ + *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \ + *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \ + } while (0) + +#define x86_fstp(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdd; \ + *(inst)++ = (unsigned char)0xd8+(index); \ + } while (0) + +#define x86_fcompp(inst) \ + do { \ + *(inst)++ = (unsigned char)0xde; \ + *(inst)++ = (unsigned char)0xd9; \ + } while (0) + +#define x86_fnstsw(inst) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + +#define x86_fnstcw(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_mem_emit ((inst), 7, (mem)); \ + } while (0) + +#define x86_fnstcw_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } while (0) + +#define x86_fldcw(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_mem_emit ((inst), 5, (mem)); \ + } while (0) + +#define x86_fldcw_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } while (0) + +#define x86_fchs(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + +#define x86_frem(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xf8; \ + } while (0) + +#define x86_fxch(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \ + } while (0) + +#define x86_fcomip(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ + } while (0) + +#define x86_fld(inst,mem,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_fld_membase(inst,basereg,disp,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_fld80(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 5, (mem)); \ + } while (0) + +#define x86_fld80_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } while (0) + +#define x86_fild(inst,mem,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_mem_emit ((inst), 5, (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 0, (mem)); \ + } \ + } while (0) + +#define x86_fild_membase(inst,basereg,disp,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } \ + } while (0) + +#define x86_fld_reg(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \ + } while (0) + +#define x86_fldz(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xee; \ + } while (0) + +#define x86_fld1(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xe8; \ + } while (0) + +#define x86_fst(inst,mem,is_double,pop_stack) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ + x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ + x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +#define x86_fist_pop(inst,mem,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_mem_emit ((inst), 7, (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 3, (mem)); \ + } \ + } while (0) + +#define x86_fist_pop_membase(inst,basereg,disp,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 3, (basereg), (disp)); \ + } \ + } while (0) + +#define x86_push_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0x50 + (reg); \ + } while (0) + +#define x86_push_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 6, (mem)); \ + } while (0) + +#define x86_push_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 6, (basereg), (disp)); \ + } while (0) + +#define x86_push_imm(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0x68; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_pop_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0x58 + (reg); \ + } while (0) + +#define x86_pop_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0x87; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_pop_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x87; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0) +#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0) +#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0) +#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0) + +#define x86_jump32(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe9; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_jump8(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xeb; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_jump_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 4, (reg)); \ + } while (0) + +#define x86_jump_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 4, (mem)); \ + } while (0) + +#define x86_jump_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 4, (basereg), (disp)); \ + } while (0) + +/* + * target is a pointer in our buffer. + */ +#define x86_jump_code(inst,target) \ + do { \ + int t = (target) - (inst) - 2; \ + if (x86_is_imm8(t)) { \ + x86_jump8 ((inst), t); \ + } else { \ + t -= 3; \ + x86_jump32 ((inst), t); \ + } \ + } while (0) + +#define x86_jump_disp(inst,disp) \ + do { \ + int t = (disp) - 2; \ + if (x86_is_imm8(t)) { \ + x86_jump8 ((inst), t); \ + } else { \ + t -= 3; \ + x86_jump32 ((inst), t); \ + } \ + } while (0) + +#define x86_branch8(inst,cond,imm,is_signed) \ + do { \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)]; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)]; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_branch32(inst,cond,imm,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_branch(inst,cond,target,is_signed) \ + do { \ + int offset = (target) - (inst) - 2; \ + if (x86_is_imm8 ((offset))) \ + x86_branch8 ((inst), (cond), offset, (is_signed)); \ + else { \ + offset -= 4; \ + x86_branch32 ((inst), (cond), offset, (is_signed)); \ + } \ + } while (0) + +#define x86_branch_disp(inst,cond,disp,is_signed) \ + do { \ + int offset = (disp) - 2; \ + if (x86_is_imm8 ((offset))) \ + x86_branch8 ((inst), (cond), offset, (is_signed)); \ + else { \ + offset -= 4; \ + x86_branch32 ((inst), (cond), offset, (is_signed)); \ + } \ + } while (0) + +#define x86_call_imm(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe8; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_call_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 2, (reg)); \ + } while (0) + +#define x86_call_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 2, (mem)); \ + } while (0) + +#define x86_call_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } while (0) + +#define x86_call_code(inst,target) \ + do { \ + int offset = (target) - (inst); \ + offset -= 5 \ + x86_call_imm ((inst), offset); \ + } while (0) + +#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) + +#define x86_ret_imm(inst,imm) \ + do { \ + if ((imm) == 0) { \ + x86_ret ((inst)); \ + } else { \ + *(inst)++ = (unsigned char)0xc2; \ + x86_imm_emit16 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_enter(inst,framesize) \ + do { \ + *(inst)++ = (unsigned char)0xc8; \ + x86_imm_emit16 ((inst), (framesize)); \ + *(inst)++ = 0; \ + } while (0) + +#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) +#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0) + +#define x86_fsin(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) +#define x86_fcos(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) +#define x86_fabs(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) +#define x86_fpatan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) +#define x86_fprem(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) +#define x86_fprem1(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) +#define x86_frndint(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0) +#define x86_fsqrt(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0) +#define x86_fptan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0) + +#define x86_padding(inst,size) \ + do { \ + switch ((size)) { \ + case 1: x86_nop ((inst)); break; \ + case 2: *(inst)++ = 0x8b; \ + *(inst)++ = 0xc0; break; \ + case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \ + *(inst)++ = 0x00; break; \ + case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + break; \ + case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + x86_nop ((inst)); break; \ + case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + break; \ + case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; break; \ + default: assert (0); \ + } \ + } while (0) + +#define x86_prolog(inst,frame_size,reg_mask) \ + do { \ + unsigned i, m = 1; \ + x86_enter ((inst), (frame_size)); \ + for (i = 0; i < X86_NREG; ++i, m <<= 1) { \ + if ((reg_mask) & m) \ + x86_push_reg ((inst), i); \ + } \ + } while (0) + +#define x86_epilog(inst,reg_mask) \ + do { \ + unsigned i, m = 1 << X86_EDI; \ + for (i = X86_EDI; m != 0; i--, m=m>>1) { \ + if ((reg_mask) & m) \ + x86_pop_reg ((inst), i); \ + } \ + x86_leave ((inst)); \ + x86_ret ((inst)); \ + } while (0) + +#endif // X86_H -- cgit v1.1 From 5263eb4d219b8054b29a4d250cec40a7c8170a84 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Wed, 8 Aug 2001 16:48:32 +0000 Subject: Update copyright svn path=/trunk/mono/; revision=440 --- x86/x86-codegen.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index a699e5f..85a0373 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,6 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. + Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.1 2001/08/08 13:32:23 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.2 2001/08/08 16:48:32 miguel Exp $ */ #ifndef X86_H -- cgit v1.1 From 75cdbf5cd16480631ac8579c2c2f230761e4802b Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 8 Aug 2001 17:21:29 +0000 Subject: Fixed x86_mov_reg_imm(). svn path=/trunk/mono/; revision=441 --- x86/x86-codegen.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 85a0373..4cf568f 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.2 2001/08/08 16:48:32 miguel Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.3 2001/08/08 17:21:29 lupus Exp $ */ #ifndef X86_H @@ -685,6 +685,7 @@ x86_cc_signed_map [X86_NCC] = { x86_alu_reg_reg ((inst), X86_XOR, (reg), (reg)); \ } else { \ *(inst)++ = (unsigned char)0xb8 + (reg); \ + x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) -- cgit v1.1 From 231c25bd596aa45a2962a9c820fc9417985a1f3f Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 18 Aug 2001 06:55:29 +0000 Subject: Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit(). Sat Aug 18 12:42:26 CEST 2001 Paolo Molaro * class.c, class.h: load also the methods when loading a class. Sat Aug 18 12:43:38 CEST 2001 Paolo Molaro * interp.c, interp.h: added support code to create exceptions. Changed interncal calling convnetion over to MonoInvocation, to support exceptions, walking the stack back and forward and passing the 'this' pointer separately (remove the cludges required before to pass this on the stack). Use alloca heavily for both local vars and a copy of the incoming arguments. Init local vars to zero. Simplify stackval_from_data() and stackval_to_data() to only take a pointer instead of pointer + offset. Implement a few exceptions-related opcodes and the code to run finally, fault and catch blocks as well as a stack trace if no handler is found. Sat Aug 18 12:51:28 CEST 2001 Paolo Molaro * metadata.c, metadata.h: in the signature and method header store only the space required for holding the loca vars and incoming arguments. svn path=/trunk/mono/; revision=493 --- ChangeLog | 5 +++++ x86/test.c | 2 +- x86/x86-codegen.h | 43 +++++++++++++++++++++++++++++++------------ 3 files changed, 37 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index 57f43d2..31eecbf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit(). + Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro * x86/x86-codegen.h, x86/test.c: added x86 code emitter with diff --git a/x86/test.c b/x86/test.c index 0ae82fe..231b7f6 100644 --- a/x86/test.c +++ b/x86/test.c @@ -202,7 +202,7 @@ int main() { x86_cmov_membase (p, X86_CC_GT, 0, X86_EAX, X86_EDX, -4); x86_nop (p); - x86_epilog (p, X86_CALLEE_REGS); + x86_epilog (p, X86_CALLER_REGS); size = p-code; for (i = 0; i < size; ++i) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 4cf568f..166030e 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.3 2001/08/08 17:21:29 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.4 2001/08/18 06:55:29 lupus Exp $ */ #ifndef X86_H @@ -39,7 +39,7 @@ typedef enum { // opcodes for shift instructions */ typedef enum { - X86_SHLD, + X86_SHLD, X86_SHLR, X86_SHL = 4, X86_SHR = 5, @@ -122,16 +122,21 @@ x86_cc_signed_map [X86_NCC] = { 0x7b, // np }; +typedef union { + int val; + unsigned char b [4]; +} x86_imm_buf; + /* // bitvector mask for callee-saved registers */ -#define X86_CALLEE_ESI_MASK (1<= -128 && (int)(imm) <= 127)) #define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1))) #define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0) +#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0) #define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) #define x86_membase_emit(inst,r,basereg,disp) do {\ @@ -668,7 +681,7 @@ x86_cc_signed_map [X86_NCC] = { x86_mem_emit ((inst), (reg), (mem)); \ } while (0) -#define x86_mov_reg_membase(inst,reg,regbase,disp,size) \ +#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ @@ -940,6 +953,12 @@ x86_cc_signed_map [X86_NCC] = { *(inst)++ = (unsigned char)0x50 + (reg); \ } while (0) +#define x86_push_regp(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_regp_emit ((inst), 6, (reg)); \ + } while (0) + #define x86_push_mem(inst,mem) \ do { \ *(inst)++ = (unsigned char)0xff; \ @@ -1076,10 +1095,10 @@ x86_cc_signed_map [X86_NCC] = { } \ } while (0) -#define x86_call_imm(inst,imm) \ +#define x86_call_imm(inst,disp) \ do { \ *(inst)++ = (unsigned char)0xe8; \ - x86_imm_emit32 ((inst), (imm)); \ + x86_imm_emit32 ((inst), (int)(disp)); \ } while (0) #define x86_call_reg(inst,reg) \ -- cgit v1.1 From d3a5cf739f1182a42d20f1d5ace2a272307da87f Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 27 Aug 2001 03:43:09 +0000 Subject: Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added. svn path=/trunk/mono/; revision=636 --- ChangeLog | 4 ++++ x86/test.c | 5 ++++- x86/x86-codegen.h | 17 ++++++++++++++--- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index 31eecbf..0116763 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added. + Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit(). diff --git a/x86/test.c b/x86/test.c index 231b7f6..23e56f7 100644 --- a/x86/test.c +++ b/x86/test.c @@ -110,6 +110,9 @@ int main() { x86_mov_mem_reg (p, mem_addr, X86_EAX, 1); x86_mov_membase_reg (p, X86_EBP, 4, X86_EAX, 1); + x86_mov_regp_reg (p, X86_EAX, X86_EAX, 4); + x86_mov_membase_reg (p, X86_EAX, 0, X86_EAX, 4); + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 1); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 4); x86_mov_reg_mem (p, X86_EAX, mem_addr, 4); @@ -179,7 +182,7 @@ int main() { x86_branch (p, X86_CC_GT, target, 0); x86_branch_disp (p, X86_CC_NE, -4, 0); - x86_call_imm (p, printf); + x86_call_code (p, printf); x86_call_reg (p, X86_ECX); x86_sahf (p); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 166030e..f671cee 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.4 2001/08/18 06:55:29 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.5 2001/08/27 03:43:09 lupus Exp $ */ #ifndef X86_H @@ -648,6 +648,17 @@ typedef union { x86_mem_emit ((inst), (reg), (mem)); \ } while (0) +#define x86_mov_regp_reg(inst,regp,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_regp_emit ((inst), (reg), (regp)); \ + } while (0) + #define x86_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ switch ((size)) { \ @@ -1121,8 +1132,8 @@ typedef union { #define x86_call_code(inst,target) \ do { \ - int offset = (target) - (inst); \ - offset -= 5 \ + int offset = (unsigned char*)(target) - (inst); \ + offset -= 5; \ x86_call_imm ((inst), offset); \ } while (0) -- cgit v1.1 From 4c39a186f2fa0dc3cca3ae6f6dc6584c75341adf Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 6 Sep 2001 09:46:03 +0000 Subject: Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro * x86/x86-codegen.h: added x86_rdtsc() and fixes. * x86/tramp.c: create trampolines to call pinvoke methods. * x86/Makefile.am: create a libmonoarch convenience library. Thu Sep 6 15:41:24 CEST 2001 Paolo Molaro * Makefile.am: link to libmonoarch. * interp.h, interp.c: use mono_create_trampoline (). Pass the command line arguments to Main (String[]) methods. svn path=/trunk/mono/; revision=728 --- ChangeLog | 6 ++ Makefile.am | 3 + x86/Makefile.am | 6 ++ x86/tramp.c | 269 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ x86/x86-codegen.h | 10 +- 5 files changed, 292 insertions(+), 2 deletions(-) create mode 100644 Makefile.am create mode 100644 x86/Makefile.am create mode 100644 x86/tramp.c diff --git a/ChangeLog b/ChangeLog index 0116763..cbcda74 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,10 @@ +Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: added x86_rdtsc() and fixes. + * x86/tramp.c: create trampolines to call pinvoke methods. + * x86/Makefile.am: create a libmonoarch convenience library. + Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added. diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..d76d6c7 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,3 @@ +# conditional compilation support here + +SUBDIRS = x86 diff --git a/x86/Makefile.am b/x86/Makefile.am new file mode 100644 index 0000000..2b4e0b1 --- /dev/null +++ b/x86/Makefile.am @@ -0,0 +1,6 @@ +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +lib_LIBRARIES = libmonoarch.a + +libmonoarch_a_SOURCES = tramp.c x86-codegen.h + diff --git a/x86/tramp.c b/x86/tramp.c new file mode 100644 index 0000000..e655c18 --- /dev/null +++ b/x86/tramp.c @@ -0,0 +1,269 @@ +/* + * Create trampolines to invoke arbitrary functions. + * + * Copyright (C) Ximian Inc. + * + * Author: Paolo Molaro (lupus@ximian.com) + * + */ + +#include "x86-codegen.h" +#include "mono/metadata/class.h" +#include "mono/interpreter/interp.h" + +/* + * The resulting function takes the form: + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + */ +#define FUNC_ADDR_POS 8 +#define RETVAL_POS 12 +#define THIS_POS 16 +#define ARGP_POS 20 +#define LOC_POS -4 + +#define ARG_SIZE sizeof (stackval) + +static char * +mono_get_ansi_string (MonoObject *o) +{ + MonoStringObject *s = (MonoStringObject *)o; + char *as, *vector; + int i; + + g_assert (o != NULL); + + if (!s->length) + return g_strdup (""); + + vector = s->c_str->vector; + + g_assert (vector != NULL); + + as = g_malloc (s->length + 1); + + /* fixme: replace with a real unicode/ansi conversion */ + for (i = 0; i < s->length; i++) { + as [i] = vector [i*2]; + } + + as [i] = '\0'; + + return as; +} + +MonoPIFunc +mono_create_trampoline (MonoMethod *method) +{ + MonoMethodSignature *sig; + unsigned char *p, *code_buffer; + guint32 local_size = 0, stack_size = 0, code_size = 30; + guint32 arg_pos; + int i, stringp; + + sig = method->signature; + + if (sig->hasthis) { + stack_size += sizeof (gpointer); + code_size += 5; + } + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + stack_size += sizeof (gpointer); + code_size += i < 10 ? 5 : 8; + continue; + } + switch (sig->params [i]->type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_R4: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + stack_size += 4; + code_size += i < 10 ? 5 : 8; + break; + case MONO_TYPE_STRING: + stack_size += 4; + code_size += 20; + local_size++; + break; + case MONO_TYPE_I8: + stack_size += 8; + code_size += i < 10 ? 5 : 8; + break; + case MONO_TYPE_R8: + stack_size += 8; + code_size += i < 10 ? 7 : 10; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + /* + * FIXME: take into account large return values. + */ + + code_buffer = p = alloca (code_size); + + /* + * Standard function prolog. + */ + x86_push_reg (p, X86_EBP); + x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); + /* + * We store some local vars here to handle string pointers. + */ + if (local_size) + x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size * 4); + + /* + * We'll need to align to at least 8 bytes boudary... (16 may be better) + * x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); + */ + + /* + * EDX has the pointer to the args. + */ + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); + + /* + * Push arguments in reverse order. + */ + stringp = 0; + for (i = sig->param_count; i; --i) { + arg_pos = ARG_SIZE * (i - 1); + if (sig->params [i - 1]->byref) { + x86_push_membase (p, X86_EDX, arg_pos); + continue; + } + switch (sig->params [i - 1]->type) { + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + x86_push_membase (p, X86_EDX, arg_pos); + break; + case MONO_TYPE_R8: + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 8); + x86_fld_membase (p, X86_EDX, arg_pos, TRUE); + x86_fst_membase (p, X86_ESP, 0, TRUE, TRUE); + break; + case MONO_TYPE_STRING: + /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ + x86_push_membase (p, X86_EDX, arg_pos); + x86_mov_reg_imm (p, X86_EDX, mono_get_ansi_string); + x86_call_reg (p, X86_EDX); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 4); + x86_push_reg (p, X86_EAX); + /* + * Store the pointer in a local we'll free later. + */ + stringp++; + x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); + /* + * we didn't save the reg: restore it here. + */ + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); + break; + case MONO_TYPE_I8: + x86_push_membase (p, X86_EDX, arg_pos + 4); + x86_push_membase (p, X86_EDX, arg_pos); + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + default: + g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); + } + } + + if (sig->hasthis) { + if (sig->call_convention != MONO_CALL_THISCALL) { + x86_mov_reg_membase (p, X86_EDX, X86_EBP, THIS_POS, 4); + x86_push_reg (p, X86_EDX); + } else { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, THIS_POS, 4); + } + } + + /* + * Insert call to function + */ + x86_mov_reg_membase (p, X86_EDX, X86_EBP, FUNC_ADDR_POS, 4); + x86_call_reg (p, X86_EDX); + + /* + * Handle retval. + * Small integer and pointer values are in EAX. + * Long integers are in EAX:EDX. + * FP values are on the FP stack. + */ + if (sig->ret->byref) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + } else { + switch (sig->ret->type) { + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + case MONO_TYPE_R4: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_fst_membase (p, X86_ECX, 0, FALSE, TRUE); + break; + case MONO_TYPE_R8: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_fst_membase (p, X86_ECX, 0, TRUE, TRUE); + break; + case MONO_TYPE_I8: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + x86_mov_membase_reg (p, X86_ECX, 4, X86_EDX, 4); + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + + /* + * free the allocated strings. + */ + if (local_size) + x86_mov_reg_imm (p, X86_EDX, g_free); + for (i = 1; i <= local_size; ++i) { + x86_push_membase (p, X86_EBP, LOC_POS * i); + x86_call_reg (p, X86_EDX); + } + /* + * Standard epilog. + */ + x86_leave (p); + x86_ret (p); + + return g_memdup (code_buffer, p - code_buffer); +} + diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index f671cee..d320f41 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.5 2001/08/27 03:43:09 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.6 2001/09/06 09:46:03 lupus Exp $ */ #ifndef X86_H @@ -238,6 +238,12 @@ typedef union { #define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) +#define x86_rdtsc(inst) \ + do { \ + *(inst)++ = 0x0f; \ + *(inst)++ = 0x31; \ + } while (0) + #define x86_cmpxchg_reg_reg(inst,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x0f; \ @@ -719,7 +725,7 @@ typedef union { *(inst)++ = (unsigned char)0xc6; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 4) { \ + } else if ((size) == 2) { \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0xc7; \ x86_mem_emit ((inst), 0, (mem)); \ -- cgit v1.1 From 680963c46ae8b96cca52387e0f5b1a2e39825b90 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 7 Sep 2001 12:53:34 +0000 Subject: Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fixes and x86_mov_membase_imm (). * x86/tramp.c: implemented mono_create_method_pointer (): creates a native pointer to a method implementation that can be used as a normal C callback. Fri Sep 7 18:45:38 CEST 2001 Paolo Molaro * interp.c, interp.h: make ves_exec_method () and stackval_from_data () non static. Implement a couple of runtime methods needed to use delegates (ves_runtime_method ()). Implemented ldftn opcode. svn path=/trunk/mono/; revision=745 --- ChangeLog | 7 +++ x86/tramp.c | 180 +++++++++++++++++++++++++++++++++++++++++++++++++++--- x86/x86-codegen.h | 24 +++++++- 3 files changed, 198 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index cbcda74..4f1f55a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,11 @@ +Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fixes and x86_mov_membase_imm (). + * x86/tramp.c: implemented mono_create_method_pointer (): + creates a native pointer to a method implementation that can be + used as a normal C callback. + Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro * x86/x86-codegen.h: added x86_rdtsc() and fixes. diff --git a/x86/tramp.c b/x86/tramp.c index e655c18..5641a47 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -122,14 +122,15 @@ mono_create_trampoline (MonoMethod *method) x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); /* * We store some local vars here to handle string pointers. + * and align to 16 byte boundary... */ - if (local_size) + if (local_size) { x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size * 4); - - /* - * We'll need to align to at least 8 bytes boudary... (16 may be better) - * x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); - */ + stack_size = (stack_size * local_size * 4) % 16; + } else { + stack_size = stack_size % 16; + } + x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); /* * EDX has the pointer to the args. @@ -147,6 +148,10 @@ mono_create_trampoline (MonoMethod *method) continue; } switch (sig->params [i - 1]->type) { + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -186,10 +191,6 @@ mono_create_trampoline (MonoMethod *method) break; case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: default: g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); } @@ -226,6 +227,7 @@ mono_create_trampoline (MonoMethod *method) case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: /* this is going to cause large pains... */ x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; @@ -267,3 +269,161 @@ mono_create_trampoline (MonoMethod *method) return g_memdup (code_buffer, p - code_buffer); } +#define MINV_POS (- sizeof (MonoInvocation)) +#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) +#define OBJ_POS 8 +#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) + +/* + * Returns a pointer to a native function that can be used to + * call the specified method. + * The function created will receive the arguments according + * to the call convention specified in the method. + * This function works by creating a MonoInvocation structure, + * filling the fields in and calling ves_exec_method on it. + * Still need to figure out how to handle the exception stuff + * across the managed/unmanaged boundary. + */ +void * +mono_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig; + unsigned char *p, *code_buffer; + gint32 local_size; + gint32 stackval_pos, arg_pos = 8; + int i; + + /* + * If it is a static P/Invoke method, we can just return the pointer + * to the method implementation. + */ + sig = method->signature; + + code_buffer = p = alloca (512); /* FIXME: check for overflows... */ + + local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1); + stackval_pos = -local_size; + + /* + * Standard function prolog with magic trick. + */ + x86_jump_code (p, code_buffer + 8); + *p++ = 'M'; + *p++ = 'o'; + *(void**)p = method; + p += 4; + x86_push_reg (p, X86_EBP); + x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size); + + /* + * Initialize MonoInvocation fields, first the ones known now. + */ + x86_mov_reg_imm (p, X86_EAX, 0); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), X86_EAX, 4); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), X86_EAX, 4); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), X86_EAX, 4); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), X86_EAX, 4); + /* + * Set the method pointer. + */ + x86_mov_membase_imm (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), (int)method, 4); + + /* + * Handle this. + */ + if (sig->hasthis) { + if (sig->call_convention != MONO_CALL_THISCALL) { + /* + * Grab it from the stack, otherwise it's already in ECX. + */ + x86_mov_reg_membase (p, X86_ECX, X86_EBP, OBJ_POS, 4); + arg_pos += 4; + } + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), X86_ECX, 4); + } + /* + * Handle the arguments. stackval_pos is the posset of the stackval array from EBP. + * arg_pos is the offset from EBP to the incoming arg on the stack. + * We just call stackval_from_data to handle all the (nasty) issues.... + */ + for (i = 0; i < sig->param_count; ++i) { + x86_mov_reg_imm (p, X86_ECX, stackval_from_data); + x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); + x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + x86_push_reg (p, X86_EDX); + x86_push_reg (p, X86_EAX); + x86_push_imm (p, sig->params [i]); + x86_call_reg (p, X86_ECX); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 12); + stackval_pos += sizeof (stackval); + arg_pos += 4; + if (!sig->params [i]->byref) { + switch (sig->params [i]->type) { + case MONO_TYPE_I8: + case MONO_TYPE_R8: + arg_pos += 4; + break; + case MONO_TYPE_VALUETYPE: + g_assert_not_reached (); /* Not implemented yet. */ + default: + break; + } + } + } + + /* + * Handle the return value storage area. + */ + x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)), X86_EAX, 4); + + /* + * Call the method. + */ + x86_lea_membase (p, X86_EAX, X86_EBP, MINV_POS); + x86_push_reg (p, X86_EAX); + x86_mov_reg_imm (p, X86_EDX, ves_exec_method); + x86_call_reg (p, X86_EDX); + + /* + * Move the return value to the proper place. + */ + x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + if (sig->ret->byref) { + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); + } else { + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); + break; + case MONO_TYPE_I8: + x86_mov_reg_membase (p, X86_EDX, X86_EAX, 4, 4); + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); + break; + case MONO_TYPE_R8: + x86_fld_membase (p, X86_EAX, 0, TRUE); + break; + default: + g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); + break; + } + } + + /* + * Standard epilog. + */ + x86_leave (p); + x86_ret (p); + + return g_memdup (code_buffer, p - code_buffer); +} + + diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index d320f41..4051c7f 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.6 2001/09/06 09:46:03 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.7 2001/09/07 12:53:34 lupus Exp $ */ #ifndef X86_H @@ -737,6 +737,24 @@ typedef union { } \ } while (0) +#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + #define x86_lea_mem(inst,reg,mem) \ do { \ *(inst)++ = (unsigned char)0x8d; \ @@ -766,7 +784,7 @@ typedef union { if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ *(inst)++ = op; \ - x86_mem_emit ((inst), (reg), (mem)); \ + x86_mem_emit ((inst), (dreg), (mem)); \ } while (0) #define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \ @@ -776,7 +794,7 @@ typedef union { if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ *(inst)++ = op; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ } while (0) #define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0) -- cgit v1.1 From 6c07667b555ca78bdad5d7b6e5aa87f8078c1989 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 10 Sep 2001 09:14:46 +0000 Subject: added the jit prototype, small fixes svn path=/trunk/mono/; revision=780 --- Makefile.am | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile.am b/Makefile.am index d76d6c7..f04bb67 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,3 +1,4 @@ # conditional compilation support here SUBDIRS = x86 + -- cgit v1.1 From ce34fcec9c53a31ba2cd48f22c9a5099d02779e5 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 10 Sep 2001 09:34:11 +0000 Subject: *** empty log message *** svn path=/trunk/mono/; revision=781 --- .cvsignore | 2 ++ x86/.cvsignore | 4 ++++ 2 files changed, 6 insertions(+) create mode 100644 .cvsignore create mode 100644 x86/.cvsignore diff --git a/.cvsignore b/.cvsignore new file mode 100644 index 0000000..c038ed7 --- /dev/null +++ b/.cvsignore @@ -0,0 +1,2 @@ +Makefile +Makefile.in \ No newline at end of file diff --git a/x86/.cvsignore b/x86/.cvsignore new file mode 100644 index 0000000..e9407c9 --- /dev/null +++ b/x86/.cvsignore @@ -0,0 +1,4 @@ +Makefile +Makefile.in +.libs +.deps -- cgit v1.1 From db78bf2c09f07356fe4c8284d1a48fa9867bd2fc Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 10 Sep 2001 14:26:02 +0000 Subject: Mon Sep 10 20:19:00 CEST 2001 Paolo Molaro * configure.in: check for sizeof(void*) and for the architecture. Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro * Makefile.am, x86/Makefile.am: conditional compile logic to make porting to different targets easier. Mon Sep 10 17:24:45 CEST 2001 Paolo Molaro * Makefile.am: make it work for make distcheck. Mon Sep 10 20:21:34 CEST 2001 Paolo Molaro * endian.h, assembly.c: fix some endianness issues. Mon Sep 10 20:20:36 CEST 2001 Paolo Molaro * interp.c: endian fixes, comments. svn path=/trunk/mono/; revision=783 --- ChangeLog | 5 +++++ Makefile.am | 15 +++++++++++++-- unknown.c | 19 +++++++++++++++++++ x86/Makefile.am | 5 +++-- 4 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 unknown.c diff --git a/ChangeLog b/ChangeLog index 4f1f55a..6211679 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro + + * Makefile.am, x86/Makefile.am: conditional compile logic + to make porting to different targets easier. + Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro * x86/x86-codegen.h: fixes and x86_mov_membase_imm (). diff --git a/Makefile.am b/Makefile.am index f04bb67..c9e91be 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,15 @@ -# conditional compilation support here +SUBDIRS = $(arch_target) +DIST_SUBDIRS = x86 -SUBDIRS = x86 +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch.la + +libmonoarch_la_SOURCES = unknown.c + +if X86 +libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la +endif + +EXTRA_DIST = ChangeLog diff --git a/unknown.c b/unknown.c new file mode 100644 index 0000000..d02edca --- /dev/null +++ b/unknown.c @@ -0,0 +1,19 @@ +#ifdef NO_PORT +#include "mono/interpreter/interp.h" + +MonoPIFunc +mono_create_trampoline (MonoMethod *method) +{ + g_error ("Unsupported arch"); + return NULL; +} + +void * +mono_create_method_pointer (MonoMethod *method) +{ + g_error ("Unsupported arch"); + return NULL; +} + +#endif + diff --git a/x86/Makefile.am b/x86/Makefile.am index 2b4e0b1..8d809b8 100644 --- a/x86/Makefile.am +++ b/x86/Makefile.am @@ -1,6 +1,7 @@ + INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -lib_LIBRARIES = libmonoarch.a +noinst_LTLIBRARIES = libmonoarch-x86.la -libmonoarch_a_SOURCES = tramp.c x86-codegen.h +libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h -- cgit v1.1 From c61474703f058c226a94ba9cdfb1d19e3a45eecd Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 12 Sep 2001 03:47:43 +0000 Subject: *** empty log message *** svn path=/trunk/mono/; revision=792 --- .cvsignore | 6 +++++- x86/.cvsignore | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.cvsignore b/.cvsignore index c038ed7..0b27fc3 100644 --- a/.cvsignore +++ b/.cvsignore @@ -1,2 +1,6 @@ Makefile -Makefile.in \ No newline at end of file +Makefile.in +.deps +.libs +*.la +*.lo \ No newline at end of file diff --git a/x86/.cvsignore b/x86/.cvsignore index e9407c9..e9793ab 100644 --- a/x86/.cvsignore +++ b/x86/.cvsignore @@ -2,3 +2,5 @@ Makefile Makefile.in .libs .deps +*.la +*.lo -- cgit v1.1 From 011e42b68518f5c1397ecdc0417c021b4c524560 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 17 Sep 2001 07:18:11 +0000 Subject: 2001-09-17 Dietmar Maurer * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest svn path=/trunk/mono/; revision=841 --- x86/x86-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 4051c7f..1da83d2 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.7 2001/09/07 12:53:34 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.8 2001/09/17 07:18:11 dietmar Exp $ */ #ifndef X86_H @@ -417,7 +417,7 @@ typedef union { #define x86_alu_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_reg_emit ((inst), (reg), (dreg)); \ + x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) #define x86_alu_reg_mem(inst,opc,reg,mem) \ -- cgit v1.1 From 4f874ee6ae2442c99421087b5ad11eae88283d55 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 17 Sep 2001 09:10:44 +0000 Subject: 2001-09-17 Dietmar Maurer * x86.brg: emit real code for calls * testjit.c (create_jit_trampoline): creates a function to trigger jit compilation. (mono_compile_method): reversed argument order svn path=/trunk/mono/; revision=842 --- ChangeLog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ChangeLog b/ChangeLog index 6211679..a250d36 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2001-09-17 Dietmar Maurer + + * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro -- cgit v1.1 From e177e60b93378860f0573f458d06cd641770a255 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 18 Sep 2001 07:26:43 +0000 Subject: Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro * x86/x86-codegen.h: remove C++ comments. svn path=/trunk/mono/; revision=865 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 42 +++++++++++++++++++++--------------------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/ChangeLog b/ChangeLog index a250d36..42f55de 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: remove C++ comments. + 2001-09-17 Dietmar Maurer * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 1da83d2..6893eb0 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.8 2001/09/17 07:18:11 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.9 2001/09/18 07:26:43 lupus Exp $ */ #ifndef X86_H @@ -96,30 +96,30 @@ typedef enum { static const unsigned char x86_cc_unsigned_map [X86_NCC] = { - 0x74, // eq - 0x75, // ne - 0x72, // lt - 0x76, // le - 0x77, // gt - 0x73, // ge - 0x78, // lz - 0x79, // gez - 0x7a, // p - 0x7b, // np + 0x74, /* eq */ + 0x75, /* ne */ + 0x72, /* lt */ + 0x76, /* le */ + 0x77, /* gt */ + 0x73, /* ge */ + 0x78, /* lz */ + 0x79, /* gez */ + 0x7a, /* p */ + 0x7b, /* np */ }; static const unsigned char x86_cc_signed_map [X86_NCC] = { - 0x74, // eq - 0x75, // ne - 0x7c, // lt - 0x7e, // le - 0x7f, // gt - 0x7d, // ge - 0x78, // lz - 0x79, // gez - 0x7a, // p - 0x7b, // np + 0x74, /* eq */ + 0x75, /* ne */ + 0x7c, /* lt */ + 0x7e, /* le */ + 0x7f, /* gt */ + 0x7d, /* ge */ + 0x78, /* lz */ + 0x79, /* gez */ + 0x7a, /* p */ + 0x7b, /* np */ }; typedef union { -- cgit v1.1 From a0930b7dcd7fe845e1c3c06f3fba6736f88d8bf9 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 20 Sep 2001 15:31:50 +0000 Subject: Thu Sep 20 16:32:42 CEST 2001 Paolo Molaro * interp.c: implemented some more opcodes: calli, rem.un, shr.un, conv.u, cpobj, stobj, conv.r.un, conv.ovf.i1.un, conv.ovf.i2.un, conv.ovf.i4.un, conv.ovf.i8.un, conv.ovf.i.un, conv.ovf.u1.un, conv.ovf.u2.un, conv.ovf.u4.un, conv.ovf.u8.un, conv.ovf.u.un. Fix some 64 bit issues in the array element access code and a small bug. Throw an exception on index out of range instead of asserting. Throw an exception on a NULL array instead of dying. Stomped a memory corruption bug (.cctor methods were freed after executing them, but they are stores in MonoClass now...). Added a simple facility to invoke the debugger when a named function is entered (use the cmdline option --debug method_name). * interp.h: fix 64 bit issue. svn path=/trunk/mono/; revision=904 --- x86/tramp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/x86/tramp.c b/x86/tramp.c index 5641a47..9aea6f7 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -7,6 +7,7 @@ * */ +#include "config.h" #include "x86-codegen.h" #include "mono/metadata/class.h" #include "mono/interpreter/interp.h" -- cgit v1.1 From c9d21b14c718c8e7f3690f5d93ac349bbdd98d88 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 21 Sep 2001 12:50:46 +0000 Subject: implemented more opcodes svn path=/trunk/mono/; revision=916 --- ChangeLog | 3 +++ x86/x86-codegen.h | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 42f55de..b34721a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2001-09-21 Dietmar Maurer + + * x86/x86-codegen.h (x86_breakpoint): added. Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6893eb0..44a240e 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.9 2001/09/18 07:26:43 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.10 2001/09/21 12:50:46 dietmar Exp $ */ #ifndef X86_H @@ -236,6 +236,11 @@ typedef union { * TODO: memindex_emit */ +#define x86_breakpoint(inst) \ + do { \ + *(inst)++ = 0xcc; \ + } while (0) + #define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) #define x86_rdtsc(inst) \ -- cgit v1.1 From a995bd527db97e45d979a6b97e0a15a479d2e14b Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sun, 23 Sep 2001 07:49:26 +0000 Subject: Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines. svn path=/trunk/mono/; revision=927 --- ChangeLog | 5 +++++ x86/tramp.c | 2 ++ 2 files changed, 7 insertions(+) diff --git a/ChangeLog b/ChangeLog index b34721a..6c2b910 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro + + * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines. + 2001-09-21 Dietmar Maurer * x86/x86-codegen.h (x86_breakpoint): added. diff --git a/x86/tramp.c b/x86/tramp.c index 9aea6f7..72a5ec8 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -227,6 +227,7 @@ mono_create_trampoline (MonoMethod *method) case MONO_TYPE_U4: case MONO_TYPE_I: case MONO_TYPE_U: + case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: /* this is going to cause large pains... */ x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); @@ -403,6 +404,7 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); break; case MONO_TYPE_I8: -- cgit v1.1 From 1f45df6d593cd60780ea121d08ddd035a3418e4a Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 24 Sep 2001 13:30:32 +0000 Subject: Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro * x86/tramp.c: don't change a MONO_TYPE_STRING to a char* when it's an argument to an internalcall. Mon Sep 24 18:56:59 CEST 2001 Paolo Molaro * object.c, object.h: added mono_ldstr (), mono_string_is_interned () and mono_string_intern () to implement the semantics of the ldstr opcode and the interning of System.Strings. * icall.c: provide hooks to make String::IsIntern and String::Intern internalcalls. Mon Sep 24 18:50:25 CEST 2001 Paolo Molaro * interp.c: catch a few more error conditions with exceptions instead of erroring out. Don't use g_print() in stack traces because it doesn't work with some float values. When we call an instance method of a valuetype class, unbox the 'this' argument if it is an object. Use mono_ldstr () to implement the ldstr opcode: it takes care of interning the string if necessary. Implemented new opcodes: ckfinite, cgt.un, clt.un, ldvirtftn, ldarga. Fixes to handle NaNs when comparing doubles. Make sure the loaded assembly has an entry point defined. Fixed portability bugs in neg and not opcodes. svn path=/trunk/mono/; revision=943 --- ChangeLog | 5 +++++ x86/tramp.c | 25 ++++++++++++++++++------- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6c2b910..c428a39 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro + + * x86/tramp.c: don't change a MONO_TYPE_STRING to a char* + when it's an argument to an internalcall. + Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines. diff --git a/x86/tramp.c b/x86/tramp.c index 72a5ec8..26bc97f 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -10,6 +10,7 @@ #include "config.h" #include "x86-codegen.h" #include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" /* @@ -149,10 +150,12 @@ mono_create_trampoline (MonoMethod *method) continue; } switch (sig->params [i - 1]->type) { + case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: + case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -170,6 +173,14 @@ mono_create_trampoline (MonoMethod *method) x86_fst_membase (p, X86_ESP, 0, TRUE, TRUE); break; case MONO_TYPE_STRING: + /* + * If it is an internalcall we assume it's the object we want. + * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. + */ + if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { + x86_push_membase (p, X86_EDX, arg_pos); + break; + } /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ x86_push_membase (p, X86_EDX, arg_pos); x86_mov_reg_imm (p, X86_EDX, mono_get_ansi_string); @@ -190,8 +201,6 @@ mono_create_trampoline (MonoMethod *method) x86_push_membase (p, X86_EDX, arg_pos + 4); x86_push_membase (p, X86_EDX, arg_pos); break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: default: g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); } @@ -256,11 +265,13 @@ mono_create_trampoline (MonoMethod *method) /* * free the allocated strings. */ - if (local_size) - x86_mov_reg_imm (p, X86_EDX, g_free); - for (i = 1; i <= local_size; ++i) { - x86_push_membase (p, X86_EBP, LOC_POS * i); - x86_call_reg (p, X86_EDX); + if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { + if (local_size) + x86_mov_reg_imm (p, X86_EDX, g_free); + for (i = 1; i <= local_size; ++i) { + x86_push_membase (p, X86_EBP, LOC_POS * i); + x86_call_reg (p, X86_EDX); + } } /* * Standard epilog. -- cgit v1.1 From a5844f903a68e9448d7031587ffbd02ed2c4f486 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 26 Sep 2001 10:33:18 +0000 Subject: Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro * x86/x86-codegen.h: added memindex addressing mode encoding (and mov to/from register opcodes). svn path=/trunk/mono/; revision=984 --- ChangeLog | 5 +++++ x86/test.c | 2 ++ x86/x86-codegen.h | 42 ++++++++++++++++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index c428a39..b977214 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: added memindex addressing mode encoding + (and mov to/from register opcodes). + Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro * x86/tramp.c: don't change a MONO_TYPE_STRING to a char* diff --git a/x86/test.c b/x86/test.c index 23e56f7..32e3f7a 100644 --- a/x86/test.c +++ b/x86/test.c @@ -113,6 +113,8 @@ int main() { x86_mov_regp_reg (p, X86_EAX, X86_EAX, 4); x86_mov_membase_reg (p, X86_EAX, 0, X86_EAX, 4); x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); + x86_mov_reg_memindex (p, X86_ECX, X86_EAX, 34, X86_EDX, 2, 4); + x86_mov_memindex_reg (p, X86_EAX, X86_EAX, 0, X86_EDX, 2, 4); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 1); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 4); x86_mov_reg_mem (p, X86_EAX, mem_addr, 4); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 44a240e..ee5d028 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.10 2001/09/21 12:50:46 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.11 2001/09/26 10:33:18 lupus Exp $ */ #ifndef X86_H @@ -232,9 +232,21 @@ typedef union { } \ } while (0) -/* - * TODO: memindex_emit - */ +#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ + do { \ + if ((disp) == 0 && (basereg) != X86_EBP) { \ + x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + } else if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + } while (0) #define x86_breakpoint(inst) \ do { \ @@ -681,6 +693,17 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + #define x86_mov_reg_reg(inst,dreg,reg,size) \ do { \ switch ((size)) { \ @@ -714,6 +737,17 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + #define x86_mov_reg_imm(inst,reg,imm) \ do { \ if ((imm) == 0) { \ -- cgit v1.1 From 0122a3ea04b06d1d51f2756e48f6392ccac1096d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 27 Sep 2001 09:38:19 +0000 Subject: Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG as basereg. svn path=/trunk/mono/; revision=995 --- ChangeLog | 5 +++++ x86/test.c | 1 + x86/x86-codegen.h | 14 ++++++++++---- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index b977214..a347cf1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG + as basereg. + Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro * x86/x86-codegen.h: added memindex addressing mode encoding diff --git a/x86/test.c b/x86/test.c index 32e3f7a..37eddb5 100644 --- a/x86/test.c +++ b/x86/test.c @@ -114,6 +114,7 @@ int main() { x86_mov_membase_reg (p, X86_EAX, 0, X86_EAX, 4); x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); x86_mov_reg_memindex (p, X86_ECX, X86_EAX, 34, X86_EDX, 2, 4); + x86_mov_reg_memindex (p, X86_ECX, X86_NOBASEREG, 34, X86_EDX, 2, 4); x86_mov_memindex_reg (p, X86_EAX, X86_EAX, 0, X86_EDX, 2, 4); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 1); x86_mov_reg_reg (p, X86_EAX, X86_EAX, 4); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ee5d028..3c5ce44 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.11 2001/09/26 10:33:18 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.12 2001/09/27 09:38:19 lupus Exp $ */ #ifndef X86_H @@ -127,6 +127,8 @@ typedef union { unsigned char b [4]; } x86_imm_buf; +#define X86_NOBASEREG (-1) + /* // bitvector mask for callee-saved registers */ @@ -234,7 +236,11 @@ typedef union { #define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ do { \ - if ((disp) == 0 && (basereg) != X86_EBP) { \ + if ((basereg) == X86_NOBASEREG) { \ + x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } else if ((disp) == 0 && (basereg) != X86_EBP) { \ x86_address_byte ((inst), 0, (r), 4); \ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ } else if (x86_is_imm8((disp))) { \ @@ -242,8 +248,8 @@ typedef union { x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ x86_imm_emit8 ((inst), (disp)); \ } else { \ - x86_address_byte ((inst), 2, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), 5); \ x86_imm_emit32 ((inst), (disp)); \ } \ } while (0) -- cgit v1.1 From 1fa26f9aa718559d3090d1c1275bf04d574368f0 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 28 Sep 2001 13:49:47 +0000 Subject: Fri Sep 28 19:26:30 CEST 2001 Paolo Molaro * metadata.c: fix type comparison for arrays. * loader.h, loader.c: half-assed fix to get more tests work in cygwin. Added a couple of new classes to monodefaults. * icall.c: added a couple of Reflection-related internalcalls. * class.h, class.c: implemented mono_ldtoken () for RuntimeTypeHandles. Added a byval_arg MonoType to MonoClass. Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro * x86/tramp.c: marshal valuetypes that are enums. Fri Sep 28 19:37:46 CEST 2001 Paolo Molaro * interp.c: Implemented ldtoken, conv.ovf.i. Use MonoClass->byval_arg (and remove related kludges). Don't choke on access to arrays of references. Throw an exception when an internalcall or P/Invoke function don't have an implementation. Throw and EngineException for unimplemented opcodes. svn path=/trunk/mono/; revision=1027 --- ChangeLog | 4 ++++ x86/tramp.c | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index a347cf1..2a60ede 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro + + * x86/tramp.c: marshal valuetypes that are enums. + Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG diff --git a/x86/tramp.c b/x86/tramp.c index 26bc97f..00bbeb4 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -94,6 +94,12 @@ mono_create_trampoline (MonoMethod *method) stack_size += 4; code_size += i < 10 ? 5 : 8; break; + case MONO_TYPE_VALUETYPE: + if (!sig->params [i]->data.klass->enumtype) + g_error ("can only marshal enums, not generic structures"); + stack_size += 4; + code_size += i < 10 ? 5 : 8; + break; case MONO_TYPE_STRING: stack_size += 4; code_size += 20; @@ -165,6 +171,7 @@ mono_create_trampoline (MonoMethod *method) case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_R4: + case MONO_TYPE_VALUETYPE: /* only enums supported right now, anyway */ x86_push_membase (p, X86_EDX, arg_pos); break; case MONO_TYPE_R8: -- cgit v1.1 From 7328e9088acbd2609dff8d07b841c3fafd894d25 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 1 Oct 2001 13:07:53 +0000 Subject: Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment and avoid a couple of unnecessary instructions. svn path=/trunk/mono/; revision=1042 --- ChangeLog | 5 +++++ x86/tramp.c | 8 +++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 2a60ede..15f623f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro + + * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment + and avoid a couple of unnecessary instructions. + Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro * x86/tramp.c: marshal valuetypes that are enums. diff --git a/x86/tramp.c b/x86/tramp.c index 00bbeb4..ec83c92 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -138,7 +138,8 @@ mono_create_trampoline (MonoMethod *method) } else { stack_size = stack_size % 16; } - x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); + if (stack_size) + x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); /* * EDX has the pointer to the args. @@ -192,7 +193,7 @@ mono_create_trampoline (MonoMethod *method) x86_push_membase (p, X86_EDX, arg_pos); x86_mov_reg_imm (p, X86_EDX, mono_get_ansi_string); x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 4); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); x86_push_reg (p, X86_EAX); /* * Store the pointer in a local we'll free later. @@ -202,7 +203,8 @@ mono_create_trampoline (MonoMethod *method) /* * we didn't save the reg: restore it here. */ - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); + if (i > 1) + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); break; case MONO_TYPE_I8: x86_push_membase (p, X86_EDX, arg_pos + 4); -- cgit v1.1 From 4ff31b89c4d3458dc378cd2e915ed08281a21a8b Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 4 Oct 2001 13:32:23 +0000 Subject: Thu Oct 4 19:10:30 CEST 2001 Paolo Molaro * class.c: MonoTypes stored in MonoClass are stored as fundamental MonoTypes when the class represents a fundamental type (System.Int32, ...). The TypeHandle return by ldtoken is a MonoType*. * icall.c: ves_icall_get_data_chunk () write out all the PE/COFF stuff. Implement ves_icall_define_method (), ves_icall_set_method_body (), ves_icall_type_from_handle (). * image.c: properly skip unknown streams. * loader.h, loader.c: add type_class to mono_defaults. * metadata.c, metadata.h: export compute_size () as mono_metadata_compute_size () with a better interface. Typo and C&P fixes. * pedump.c: don't try to print the entry point RVA if there is no entry point. * reflection.c, reflection.h: many cleanups, fixes, output method signatures and headers, typedef and typeref info, compress the metadata tables, output all the heap streams, cli header etc. * row-indexes.h: typo fixes. Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro * x86/tramp.c: allow marshalling valuetypes if they are 4 bytes long. Thu Oct 4 19:05:56 CEST 2001 Paolo Molaro * dis-cil.c: fix printing of exception stuff. * dump.c: display some more info in the typedef table dump. * main.c: typo fix and method list fix. svn path=/trunk/mono/; revision=1071 --- ChangeLog | 5 +++++ x86/tramp.c | 15 ++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 15f623f..43bb4a9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro + + * x86/tramp.c: allow marshalling valuetypes if they are + 4 bytes long. + Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment diff --git a/x86/tramp.c b/x86/tramp.c index ec83c92..38ce31f 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -95,8 +95,8 @@ mono_create_trampoline (MonoMethod *method) code_size += i < 10 ? 5 : 8; break; case MONO_TYPE_VALUETYPE: - if (!sig->params [i]->data.klass->enumtype) - g_error ("can only marshal enums, not generic structures"); + if (!sig->params [i]->data.klass->enumtype && (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4)) + g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); stack_size += 4; code_size += i < 10 ? 5 : 8; break; @@ -172,9 +172,18 @@ mono_create_trampoline (MonoMethod *method) case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_R4: - case MONO_TYPE_VALUETYPE: /* only enums supported right now, anyway */ x86_push_membase (p, X86_EDX, arg_pos); break; + case MONO_TYPE_VALUETYPE: + if (!sig->params [i - 1]->data.klass->enumtype) { + /* it's a structure that fits in 4 bytes, need to push the value pointed to */ + x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_push_regp (p, X86_EAX); + } else { + /* it's an enum value */ + x86_push_membase (p, X86_EDX, arg_pos); + } + break; case MONO_TYPE_R8: x86_alu_reg_imm (p, X86_SUB, X86_ESP, 8); x86_fld_membase (p, X86_EDX, arg_pos, TRUE); -- cgit v1.1 From 27043fee95be8bec691045d7ab39b1be553550e9 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 8 Oct 2001 14:33:48 +0000 Subject: Mon Oct 8 20:27:50 CEST 2001 Paolo Molaro * configure.in: define NO_UNALIGNED_ACCESS for platforms that can't read on unaligned boundaries Mon Oct 8 16:12:38 CEST 2001 Paolo Molaro * metadata.c, metadata.h: use MonoArrayType to describe the shape of an array. Guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux). * image.c: endian fixes by Laurent Rioux. * object.h, object.c: rename MonoStringObject to MonoString and MonoArrayObject to MonoArray. Change some function names to conform to the style mono__. mono_string_new_utf16 () takes a guint16* as first argument, so don't use char*. Provide macros to do the interesting things on arrays in a portable way. * threads-pthread.c: updates for the API changes and #include (required for sched_yield()). * icall.c: updates for the API changes above. * Makefile.am, mono-endian.c. mono-endian.h: include unaligned read routines for platforms that need them. Mon Oct 8 16:13:55 CEST 2001 Paolo Molaro * get.c, get.h: MonoArray changed in MonoArrayType. * main.c: guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux). Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro * x86/tramp.c: remove mono_get_ansi_string () and use mono_string_to_utf8 () instead. Mon Oct 8 16:14:40 CEST 2001 Paolo Molaro * interp.c: use the accessors provided in object.h to deal with MonoArrays. Updates for API renames in metadata. Throw exception in ldelema if index is out of bounds. svn path=/trunk/mono/; revision=1122 --- ChangeLog | 5 +++++ x86/tramp.c | 30 +----------------------------- 2 files changed, 6 insertions(+), 29 deletions(-) diff --git a/ChangeLog b/ChangeLog index 43bb4a9..4e4a7e3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro + + * x86/tramp.c: remove mono_get_ansi_string () and use + mono_string_to_utf8 () instead. + Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro * x86/tramp.c: allow marshalling valuetypes if they are diff --git a/x86/tramp.c b/x86/tramp.c index 38ce31f..5b6de5b 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -25,34 +25,6 @@ #define ARG_SIZE sizeof (stackval) -static char * -mono_get_ansi_string (MonoObject *o) -{ - MonoStringObject *s = (MonoStringObject *)o; - char *as, *vector; - int i; - - g_assert (o != NULL); - - if (!s->length) - return g_strdup (""); - - vector = s->c_str->vector; - - g_assert (vector != NULL); - - as = g_malloc (s->length + 1); - - /* fixme: replace with a real unicode/ansi conversion */ - for (i = 0; i < s->length; i++) { - as [i] = vector [i*2]; - } - - as [i] = '\0'; - - return as; -} - MonoPIFunc mono_create_trampoline (MonoMethod *method) { @@ -200,7 +172,7 @@ mono_create_trampoline (MonoMethod *method) } /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ x86_push_membase (p, X86_EDX, arg_pos); - x86_mov_reg_imm (p, X86_EDX, mono_get_ansi_string); + x86_mov_reg_imm (p, X86_EDX, mono_string_to_utf8); x86_call_reg (p, X86_EDX); x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); x86_push_reg (p, X86_EAX); -- cgit v1.1 From f6b50c3852378ca35cef63056ddec70585b3ac32 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 10 Oct 2001 10:11:17 +0000 Subject: Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro * x86/x86-codegen.c: added x86_set_{reg,mem,membase}. svn path=/trunk/mono/; revision=1133 --- ChangeLog | 4 ++++ x86/test.c | 3 +++ x86/x86-codegen.h | 32 +++++++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 4e4a7e3..57edae5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro + + * x86/x86-codegen.c: added x86_set_{reg,mem,membase}. + Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro * x86/tramp.c: remove mono_get_ansi_string () and use diff --git a/x86/test.c b/x86/test.c index 37eddb5..45fb578 100644 --- a/x86/test.c +++ b/x86/test.c @@ -185,6 +185,9 @@ int main() { x86_branch (p, X86_CC_GT, target, 0); x86_branch_disp (p, X86_CC_NE, -4, 0); + x86_set_reg (p, X86_CC_EQ, X86_EAX, 0); + x86_set_membase (p, X86_CC_LE, X86_EBP, -8, 0); + x86_call_code (p, printf); x86_call_reg (p, X86_ECX); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 3c5ce44..37006c7 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.12 2001/09/27 09:38:19 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.13 2001/10/10 10:11:17 lupus Exp $ */ #ifndef X86_H @@ -1175,6 +1175,36 @@ typedef union { } \ } while (0) +#define x86_set_reg(inst,cond,reg,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_reg_emit ((inst), 0, (reg)); \ + } while (0) + +#define x86_set_mem(inst,cond,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_set_membase(inst,cond,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + #define x86_call_imm(inst,disp) \ do { \ *(inst)++ = (unsigned char)0xe8; \ -- cgit v1.1 From 689da148c801d119d0d2722ef74a497e95c5f1b3 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 22 Oct 2001 09:24:31 +0000 Subject: Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro * x86/tramp.c: handle boolean, u1 and i1 as return values. svn path=/trunk/mono/; revision=1192 --- ChangeLog | 4 ++++ x86/tramp.c | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index 57edae5..c6df60a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro + + * x86/tramp.c: handle boolean, u1 and i1 as return values. + Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro * x86/x86-codegen.c: added x86_set_{reg,mem,membase}. diff --git a/x86/tramp.c b/x86/tramp.c index 5b6de5b..90bcb90 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -222,6 +222,12 @@ mono_create_trampoline (MonoMethod *method) x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); } else { switch (sig->ret->type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 1); + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: -- cgit v1.1 From 306ec85b780f5f9c99ffaf19f51baa6548a298a6 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 7 Nov 2001 06:33:48 +0000 Subject: 2001-11-07 Dietmar Maurer * emit-x86.c (enter_method): print out all method arguments (x86_magic_trampoline): impl. (arch_create_simple_jit_trampoline): we use different trampolines for static methods (no need to write the address back into to vtable). svn path=/trunk/mono/; revision=1278 --- x86/x86-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 37006c7..6a182d1 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.13 2001/10/10 10:11:17 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.14 2001/11/07 06:33:48 dietmar Exp $ */ #ifndef X86_H @@ -1114,7 +1114,7 @@ typedef union { */ #define x86_jump_code(inst,target) \ do { \ - int t = (target) - (inst) - 2; \ + int t = (unsigned char*)(target) - (inst) - 2; \ if (x86_is_imm8(t)) { \ x86_jump8 ((inst), t); \ } else { \ -- cgit v1.1 From bff8e602354a8d32dfaed336600b5f648af06e70 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Thu, 8 Nov 2001 21:38:32 +0000 Subject: 2001-11-07 Miguel de Icaza * x86/tramp.c: Include stdlib to kill warning. 2001-11-07 Miguel de Icaza * main.c (dis_property_methods): Added missing colon which avoided setting loc.t 2001-11-07 Miguel de Icaza * interp.c: Include stdlib to kill warning. (check_corlib): Adjust format encodings to remove warnings. 2001-11-07 Miguel de Icaza * reflection.c (build_compressed_metadata): Eliminates warnings and uses 64-bit clean code. * metadata.c (mono_type_hash): Change signature to eliminate warnings. (mono_type_equal): Change signature to eliminate warnings. 2001-11-07 Miguel de Icaza * monoburg.y: Include string.h, stdlib.h to kill warnings. * sample.brg: Include string.h to remove warnings. svn path=/trunk/mono/; revision=1298 --- ChangeLog | 3 +++ x86/tramp.c | 1 + 2 files changed, 4 insertions(+) diff --git a/ChangeLog b/ChangeLog index c6df60a..10f43c6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2001-11-07 Miguel de Icaza + + * x86/tramp.c: Include stdlib to kill warning. Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index 90bcb90..0f1e014 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -8,6 +8,7 @@ */ #include "config.h" +#include #include "x86-codegen.h" #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" -- cgit v1.1 From 041ab742894fbd6d90e2ffb3c6fddb60a869e952 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 9 Nov 2001 13:40:43 +0000 Subject: 2001-11-09 Dietmar Maurer * testjit.c (mono_analyze_stack): new BOX impl. * x86.brg: implemented INITOBJ * testjit.c (mono_analyze_stack): finished array support (mono_analyze_stack): reimplemented DUP instruction svn path=/trunk/mono/; revision=1308 --- x86/x86-codegen.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6a182d1..6f65e17 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.14 2001/11/07 06:33:48 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.15 2001/11/09 13:40:43 dietmar Exp $ */ #ifndef X86_H @@ -259,6 +259,10 @@ typedef union { *(inst)++ = 0xcc; \ } while (0) +#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0) +#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0) +#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0) + #define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) #define x86_rdtsc(inst) \ -- cgit v1.1 From af643d34335bfdc90a7455f99847e954456bb07d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 14 Nov 2001 15:18:56 +0000 Subject: Wed Nov 14 19:21:26 CET 2001 Paolo Molaro * x86/tramp.c: handle boolean as a return value. * x96/x86-codegen.c: x86_widen_memindex() added. Wed Nov 14 19:23:00 CET 2001 Paolo Molaro * interp.c: move the stack frame dumping code to a function so it can be called from the debugger. Fix virtual method lookup for interfaces. Throw exceptions instead of aborting in more places. Print also the message in an exception. Updates for field renames in corlib. Wed Nov 14 19:26:06 CET 2001 Paolo Molaro * class.h, class.c: add a max_interface_id to MonoClass. * icall.c: rename my_mono_new_object() to my_mono_new_mono_type() since it's used to do that. Added mono_type_type_from_obj(). Make GetType() return NULL instead of segfaulting if the type was not found. Handle simple arrays in assQualifiedName. * object.h: add a struct to represent an Exception. * reflection.c: output call convention in method signature. Add code to support P/Invoke methods and fixed offsets for fields. svn path=/trunk/mono/; revision=1352 --- ChangeLog | 6 ++++++ x86/test.c | 1 + x86/tramp.c | 3 +++ x86/x86-codegen.h | 12 +++++++++++- 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 10f43c6..e621380 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ + +Wed Nov 14 19:21:26 CET 2001 Paolo Molaro + + * x86/tramp.c: handle boolean as a return value. + * x96/x86-codegen.c: x86_widen_memindex() added. + 2001-11-07 Miguel de Icaza * x86/tramp.c: Include stdlib to kill warning. diff --git a/x86/test.c b/x86/test.c index 45fb578..a9695dc 100644 --- a/x86/test.c +++ b/x86/test.c @@ -126,6 +126,7 @@ int main() { x86_lea_mem (p, X86_EDX, mem_addr); /* test widen */ + x86_widen_memindex (p, X86_EDX, X86_ECX, 0, X86_EBX, 2, 1, 0); x86_cdq (p); x86_wait (p); diff --git a/x86/tramp.c b/x86/tramp.c index 0f1e014..8790219 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -406,6 +406,9 @@ mono_create_method_pointer (MonoMethod *method) switch (sig->ret->type) { case MONO_TYPE_VOID: break; + case MONO_TYPE_BOOLEAN: + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 1); + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6f65e17..2b9b1e1 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.15 2001/11/09 13:40:43 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.16 2001/11/14 15:18:55 lupus Exp $ */ #ifndef X86_H @@ -846,6 +846,16 @@ typedef union { x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ } while (0) +#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + #define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0) #define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0) -- cgit v1.1 From c4a26e54cfa29ea5279d1964ef4ea7f6176c0357 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 19 Nov 2001 06:52:53 +0000 Subject: Mon Nov 19 11:37:14 CET 2001 Paolo Molaro * class.c, class.h: add mono_install_trampoline() so that the runtime can register a function to create a trampoline: removes the ugly requirement that a runtime needed to export arch_create_jit_trampoline. * object.h, object.c: added mono_install_handler() so that the runtime can install an handler for exceptions generated in C code (with mono_raise_exception()). Added C struct for System.Delegate. * pedump.c: removed arch_create_jit_trampoline. * reflection.c: some cleanups to allow registering user strings and later getting a token for methodrefs and fieldrefs before the assembly is built. * row-indexes.h: updates and fixes from the new ECMA specs. Mon Nov 19 11:36:22 CET 2001 Paolo Molaro * jit.c: use mono_install_trampoline (), instead of exporting a function to a lower-level library. Mon Nov 19 11:33:00 CET 2001 Paolo Molaro * interp.c: start adding support for handling exceptions across managed/unmanaged boundaries. Cleanup Delegate method invocation. Pass the correct target object in Delegate::Invoke and use the correct 'this' pointer in ldvirtftn (bugs pointed out by Dietmar). Mon Nov 19 11:32:28 CET 2001 Paolo Molaro * main.c: remove arch_create_jit_trampoline(). svn path=/trunk/mono/; revision=1380 --- ChangeLog | 4 ++++ x86/tramp.c | 28 ++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index e621380..6eb7c17 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Thu Nov 15 17:41:01 CET 2001 Paolo Molaro + + * x86/tramp.c: handle enums with underlying type different from int32. + Wed Nov 14 19:21:26 CET 2001 Paolo Molaro * x86/tramp.c: handle boolean as a return value. diff --git a/x86/tramp.c b/x86/tramp.c index 8790219..51213fb 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -32,7 +32,7 @@ mono_create_trampoline (MonoMethod *method) MonoMethodSignature *sig; unsigned char *p, *code_buffer; guint32 local_size = 0, stack_size = 0, code_size = 30; - guint32 arg_pos; + guint32 arg_pos, simpletype; int i, stringp; sig = method->signature; @@ -48,7 +48,9 @@ mono_create_trampoline (MonoMethod *method) code_size += i < 10 ? 5 : 8; continue; } - switch (sig->params [i]->type) { + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: @@ -68,7 +70,11 @@ mono_create_trampoline (MonoMethod *method) code_size += i < 10 ? 5 : 8; break; case MONO_TYPE_VALUETYPE: - if (!sig->params [i]->data.klass->enumtype && (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4)) + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); stack_size += 4; code_size += i < 10 ? 5 : 8; @@ -129,7 +135,9 @@ mono_create_trampoline (MonoMethod *method) x86_push_membase (p, X86_EDX, arg_pos); continue; } - switch (sig->params [i - 1]->type) { + simpletype = sig->params [i - 1]->type; +enum_marshal: + switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: @@ -154,7 +162,8 @@ mono_create_trampoline (MonoMethod *method) x86_push_regp (p, X86_EAX); } else { /* it's an enum value */ - x86_push_membase (p, X86_EDX, arg_pos); + simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; + goto enum_marshal; } break; case MONO_TYPE_R8: @@ -222,7 +231,9 @@ mono_create_trampoline (MonoMethod *method) x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); } else { - switch (sig->ret->type) { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: @@ -252,6 +263,11 @@ mono_create_trampoline (MonoMethod *method) x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); x86_mov_membase_reg (p, X86_ECX, 4, X86_EDX, 4); break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } case MONO_TYPE_VOID: break; default: -- cgit v1.1 From 719926a4c59c399767f10b9567859300a768b05a Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 27 Nov 2001 10:30:39 +0000 Subject: Tue Nov 27 15:24:07 CET 2001 Paolo Molaro * x96/x86-codegen.c: x86_lea_memindex() added. svn path=/trunk/mono/; revision=1447 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 6eb7c17..7a6d105 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Tue Nov 27 15:24:07 CET 2001 Paolo Molaro + + * x96/x86-codegen.c: x86_lea_memindex() added. + Thu Nov 15 17:41:01 CET 2001 Paolo Molaro * x86/tramp.c: handle enums with underlying type different from int32. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 2b9b1e1..b440fd3 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.16 2001/11/14 15:18:55 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.17 2001/11/27 10:30:39 lupus Exp $ */ #ifndef X86_H @@ -816,6 +816,12 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + #define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \ do { \ unsigned char op = 0xb6; \ -- cgit v1.1 From 2c1c4889b99aaf4be0b894ea24b4d92201cb282d Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Thu, 29 Nov 2001 19:32:19 +0000 Subject: added files for initial ppc support svn path=/trunk/mono/; revision=1476 --- ppc/.cvsignore | 6 + ppc/Makefile.am | 8 ++ ppc/ppc-codegen.h | 71 ++++++++++ ppc/test.c | 30 +++++ ppc/tramp.c | 397 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 512 insertions(+) create mode 100644 ppc/.cvsignore create mode 100644 ppc/Makefile.am create mode 100644 ppc/ppc-codegen.h create mode 100644 ppc/test.c create mode 100644 ppc/tramp.c diff --git a/ppc/.cvsignore b/ppc/.cvsignore new file mode 100644 index 0000000..e9793ab --- /dev/null +++ b/ppc/.cvsignore @@ -0,0 +1,6 @@ +Makefile +Makefile.in +.libs +.deps +*.la +*.lo diff --git a/ppc/Makefile.am b/ppc/Makefile.am new file mode 100644 index 0000000..ddfb109 --- /dev/null +++ b/ppc/Makefile.am @@ -0,0 +1,8 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-ppc.la + +libmonoarch_ppc_la_SOURCES = tramp.c ppc-codegen.h + +noinst_PROGRAMS = test \ No newline at end of file diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h new file mode 100644 index 0000000..b4915f2 --- /dev/null +++ b/ppc/ppc-codegen.h @@ -0,0 +1,71 @@ +/* + Copyright (C) 2001 Radek Doulik +*/ + +#ifndef PPC_H +#define PPC_H +#include +#include + +typedef enum { + r0 = 0, + r1, + r2, + r3, + r4, + r5, + r6, + r7, + r8, + r9, + r10, + r11, + r12, + r13, + r14, + r15, + r16, + r17, + r18, + r19, + r20, + r21, + r22, + r23, + r24, + r25, + r26, + r27, + r28, + r29, + r30, + r31 +} PPCIntRegister; + +typedef enum { + lr = 256, +} PPCSpecialRegister; + +#define emit32(c,x) *((guint32 *) c) = x; ((guint32 *)c)++ +#define emit32_bad(c,val) { \ +guint32 x = val; \ +c[0] = x & 0xff; x >>= 8; \ +c[1] = x & 0xff; x >>= 8; \ +c[2] = x & 0xff; x >>= 8; \ +c[3] = x; c += 4; } + +#define addi(c,D,A,d) emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define lwz(c,D,d,a) emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) +#define stw(c,S,d,a) emit32 (c, (36 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +#define stwu(c,s,d,a) emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) +#define or(c,a,s,b) emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) +#define mr(c,a,s) or (c, a, s, s) +#define mfspr(c,D,spr) emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1)) +#define mflr(c,D) mfspr (c, D, lr) +#define mtspr(c,spr,S) emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) +#define mtlr(c,S) mtspr (c, lr, S) + +#define blrl(c) emit32(c, 0x4e800021) +#define blr(c) emit32(c, 0x4e800020) + +#endif diff --git a/ppc/test.c b/ppc/test.c new file mode 100644 index 0000000..0b56490 --- /dev/null +++ b/ppc/test.c @@ -0,0 +1,30 @@ +#include "ppc-codegen.h" +#include + +/* don't run the resulting program, it will destroy your computer, + * just objdump -d it to inspect we generated the correct assembler. + */ + +int main() { + guint8 code [16000]; + guint8 *p = code; + guint8 *cp; + + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + + stwu (p, r1, -32, r1); + mflr (p, r0); + stw (p, r31, 28, r1); + or (p, r1, r2, r3); + mr (p, r31, r1); + lwz (p, r11, 0, r1); + mtlr (p, r0); + blr (p); + addi (p, r6, r6, 16); + + for (cp = code; cp < p; cp++) { + printf (".byte 0x%x\n", *cp); + } + + return 0; +} diff --git a/ppc/tramp.c b/ppc/tramp.c new file mode 100644 index 0000000..8dea860 --- /dev/null +++ b/ppc/tramp.c @@ -0,0 +1,397 @@ +/* + * Create trampolines to invoke arbitrary functions. + * + * Copyright (C) Radek Doulik + * + */ + +#include "config.h" +#include +#include "ppc-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" + +#ifdef NEED_MPROTECT +#include +#include /* for PAGESIZE */ +#ifndef PAGESIZE +#define PAGESIZE 4096 +#endif +#endif + +/* void +fake_func (gpointer (*callme)(), void *retval, void *this_obj, stackval *arguments) +{ + *(gpointer*)retval = (*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); +} */ + +#define MIN_CACHE_LINE 8 + +static void inline +flush_icache (guint8 *code, guint size) +{ + guint i; + guint8 *p; + + p = code; + for (i = 0; i < size; i += MIN_CACHE_LINE, p += MIN_CACHE_LINE) { + asm ("dcbst 0,%0;" : : "r"(p) : "memory"); + } + asm ("sync"); + p = code; + for (i = 0; i < size; i += MIN_CACHE_LINE, p += MIN_CACHE_LINE) { + asm ("icbi 0,%0; sync;" : : "r"(p) : "memory"); + } + asm ("sync"); + asm ("isync"); +} + +#define NOT_IMPLEMENTED \ + g_error ("FIXME: Not yet implemented. (trampoline)"); + +#define PROLOG_INS 8 +#define CALL_INS 2 +#define EPILOG_INS 6 +#define MINIMAL_STACK_SIZE 4 +#define FLOAT_REGS 8 +#define GENERAL_REGS 8 + +static void inline +add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) +{ + if (simple) { + if (*gr >= GENERAL_REGS) { + *stack_size += 4; + *code_size += 8; /* load from stack, save on stack */ + } else { + *code_size += 4; /* load from stack */ + } + } else { + if (*gr >= GENERAL_REGS - 1) { + *stack_size += 8 + (*stack_size % 8); + *code_size += 16; /* 2x load from stack, save to stack */ + } else { + *code_size += 16; /* 2x load from stack */ + } + (*gr) ++; + } + (*gr) ++; +} + +static void inline +calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) +{ + MonoMethodSignature *sig; + guint i, fr, gr; + guint32 simpletype; + + fr = gr = 0; + *stack_size = MINIMAL_STACK_SIZE*4; + *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS)*4; + + sig = method->signature; + if (sig->hasthis) { + add_general (&gr, stack_size, code_size, TRUE); + } + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + g_error ("FIXME, trampoline: byref"); + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + add_general (&gr, stack_size, code_size, TRUE); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) + g_error ("can only marshal enums, not generic structures (size: %d)", + mono_class_value_size (sig->params [i]->data.klass, NULL)); + add_general (&gr, stack_size, code_size, TRUE); + break; + case MONO_TYPE_STRING: + NOT_IMPLEMENTED; + break; + case MONO_TYPE_I8: + add_general (&gr, stack_size, code_size, FALSE); + break; + case MONO_TYPE_R4: + case MONO_TYPE_R8: + NOT_IMPLEMENTED; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + if (sig->ret->byref) { + g_error ("trampoline, retval byref - TODO"); + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + *code_size += 8; + break; + case MONO_TYPE_STRING: + NOT_IMPLEMENTED; + break; + case MONO_TYPE_I8: + *code_size += 12; + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + + /* align stack size to 16 */ + printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size); + *stack_size = (*stack_size + 15) & ~15; + +} + +static inline guint8 * +emit_prolog (guint8 *p, guint stack_size) +{ + /* function prolog */ + stwu (p, r1, -stack_size, r1); /* sp <--- sp - 48, sp[0] <---- sp save sp, allocate stack */ + mflr (p, r0); /* r0 <--- LR */ + stw (p, r31, stack_size - 4, r1); /* sp[44] <--- r31 save r31 */ + stw (p, r0, stack_size + 4, r1); /* sp[52] <--- LR save return address in "callme" stack frame */ + mr (p, r31, r1); /* r31 <--- sp */ + + /* handle our parameters */ + mr (p, r14, r6); /* keep "arguments" in register */ + mr (p, r0, r3); /* keep "callme" in register */ + stw (p, r4, 8, r31); /* preserve "retval", sp[8] */ + + return p; +} + +#define SAVE_4_IN_GENERIC_REGISTER \ + if (gr < GENERAL_REGS) { \ + lwz (p, r3 + gr, i*16, r14); \ + gr ++; \ + } else { \ + NOT_IMPLEMENTED; \ + } + +inline static guint8* +emit_save_parameters (guint8 *p, MonoMethod *method) +{ + MonoMethodSignature *sig; + guint i, fr, gr; + guint32 simpletype; + + fr = gr = 0; + + sig = method->signature; + if (sig->hasthis) { + g_warning ("FIXME: trampoline, decide on MONO_CALL_THISCALL"); + mr (p, r3, r5); + gr ++; + } + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + g_error ("FIXME, trampoline: byref"); + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + SAVE_4_IN_GENERIC_REGISTER; + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) + g_error ("can only marshal enums, not generic structures (size: %d)", + mono_class_value_size (sig->params [i]->data.klass, NULL)); + SAVE_4_IN_GENERIC_REGISTER; + break; + case MONO_TYPE_STRING: + NOT_IMPLEMENTED; + break; + case MONO_TYPE_I8: + if (gr < 7) { + g_warning ("check endianess"); + lwz (p, r3 + gr, i*16, r14); + gr ++; + lwz (p, r3 + gr, i*17, r14); + gr ++; + } else { + NOT_IMPLEMENTED; + } + break; + case MONO_TYPE_R4: + case MONO_TYPE_R8: + NOT_IMPLEMENTED; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + return p; +} + +static inline guint8 * +alloc_code_memory (guint code_size) +{ + guint8 *p; + +#ifdef NEED_MPROTECT + p = g_malloc (code_size + PAGESIZE - 1); + + /* Align to a multiple of PAGESIZE, assumed to be a power of two */ + p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1)); +#else + p = g_malloc (code_size); +#endif + printf (" align: %p (%d)\n", p, (guint)p % 4); + + return p; +} + +static inline guint8 * +emit_call_and_store_retval (guint8 *p, MonoMethod *method) +{ + /* call "callme" */ + mtlr (p, r0); + blrl (p); + + /* get return value */ + lwz (p, r9, 8, r31); /* load "retval" address */ + stw (p, r3, 0, r9); /* save return value (r3) to "retval" */ + + return p; +} + +static inline guint8 * +emit_epilog (guint8 *p) +{ + /* function epilog */ + lwz (p, r11, 0, r1); /* r11 <--- sp[0] load backchain from caller's function */ + lwz (p, r0, 4, r11); /* r0 <--- r11[4] load return address */ + mtlr (p, r0); /* LR <--- r0 set return address */ + lwz (p, r31, -4, r11); /* r31 <--- r11[-4] restore r31 */ + mr (p, r1, r11); /* sp <--- r11 restore stack */ + blr (p); /* return */ + + return p; +} + +MonoPIFunc +mono_create_trampoline (MonoMethod *method) +{ + guint8 *p, *code_buffer; + guint stack_size, code_size; + + printf ("\nPInvoke [start emiting]\n"); + + calculate_sizes (method, &stack_size, &code_size); + + p = code_buffer = alloc_code_memory (code_size); + p = emit_prolog (p, stack_size); + p = emit_save_parameters (p, method); + p = emit_call_and_store_retval (p, method); + p = emit_epilog (p); + + /* { + guchar *cp; + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + for (cp = code_buffer; cp < p; cp++) { + printf (".byte 0x%x\n", *cp); + } + } */ + +#ifdef NEED_MPROTECT + if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) { + g_error ("Cannot mprotect trampoline\n"); + } +#endif + + printf ("emited code size: %d\n", p - code_buffer); + flush_icache (code_buffer, p - code_buffer); + + printf ("PInvoke [end emiting]\n"); + + return (MonoPIFunc) code_buffer; + /* return fake_func; */ +} + + +#define MINV_POS (- sizeof (MonoInvocation)) +#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) +#define OBJ_POS 8 +#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) + +/* + * Returns a pointer to a native function that can be used to + * call the specified method. + * The function created will receive the arguments according + * to the call convention specified in the method. + * This function works by creating a MonoInvocation structure, + * filling the fields in and calling ves_exec_method on it. + * Still need to figure out how to handle the exception stuff + * across the managed/unmanaged boundary. + */ +void * +mono_create_method_pointer (MonoMethod *method) +{ + return NULL; +} + + -- cgit v1.1 From c4f49a88d52479062bd8b95669cb90c1b86242d0 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Thu, 29 Nov 2001 19:32:48 +0000 Subject: added test svn path=/trunk/mono/; revision=1477 --- ppc/.cvsignore | 1 + 1 file changed, 1 insertion(+) diff --git a/ppc/.cvsignore b/ppc/.cvsignore index e9793ab..3c6240d 100644 --- a/ppc/.cvsignore +++ b/ppc/.cvsignore @@ -4,3 +4,4 @@ Makefile.in .deps *.la *.lo +test \ No newline at end of file -- cgit v1.1 From 0a65eb2cf0b69f68849e7196b6e00133b3ecf3fc Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Thu, 29 Nov 2001 20:19:00 +0000 Subject: 2001-11-29 Radek Doulik * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS generate libmonoarch for ppc svn path=/trunk/mono/; revision=1478 --- ChangeLog | 4 ++++ Makefile.am | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 7a6d105..4244698 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2001-11-29 Radek Doulik + + * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS + generate libmonoarch for ppc Tue Nov 27 15:24:07 CET 2001 Paolo Molaro diff --git a/Makefile.am b/Makefile.am index c9e91be..40b50bf 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 +DIST_SUBDIRS = x86 ppc INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) @@ -10,6 +10,9 @@ libmonoarch_la_SOURCES = unknown.c if X86 libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la endif +if POWERPC +libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la +endif EXTRA_DIST = ChangeLog -- cgit v1.1 From 813f9d5a9dcbe48c711bbb8bacc876e976ce0aea Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Thu, 29 Nov 2001 21:23:53 +0000 Subject: 2001-11-29 Radek Doulik * ppc/tramp.c: use r12 which is volatile instead of non-volatile r14 to avoid saving svn path=/trunk/mono/; revision=1482 --- ChangeLog | 3 +++ ppc/tramp.c | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4244698..b78682c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,8 @@ 2001-11-29 Radek Doulik + * ppc/tramp.c: use r12 which is volatile instead of non-volatile + r14 to avoid saving + * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS generate libmonoarch for ppc diff --git a/ppc/tramp.c b/ppc/tramp.c index 8dea860..ce7b53b 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -197,7 +197,7 @@ emit_prolog (guint8 *p, guint stack_size) mr (p, r31, r1); /* r31 <--- sp */ /* handle our parameters */ - mr (p, r14, r6); /* keep "arguments" in register */ + mr (p, r12, r6); /* keep "arguments" in register */ mr (p, r0, r3); /* keep "callme" in register */ stw (p, r4, 8, r31); /* preserve "retval", sp[8] */ @@ -206,7 +206,7 @@ emit_prolog (guint8 *p, guint stack_size) #define SAVE_4_IN_GENERIC_REGISTER \ if (gr < GENERAL_REGS) { \ - lwz (p, r3 + gr, i*16, r14); \ + lwz (p, r3 + gr, i*16, r12); \ gr ++; \ } else { \ NOT_IMPLEMENTED; \ @@ -267,9 +267,9 @@ emit_save_parameters (guint8 *p, MonoMethod *method) case MONO_TYPE_I8: if (gr < 7) { g_warning ("check endianess"); - lwz (p, r3 + gr, i*16, r14); + lwz (p, r3 + gr, i*16, r12); gr ++; - lwz (p, r3 + gr, i*17, r14); + lwz (p, r3 + gr, i*17, r12); gr ++; } else { NOT_IMPLEMENTED; -- cgit v1.1 From 35430229b14448182d84a7f9348995019251fb28 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 13 Dec 2001 11:03:21 +0000 Subject: Thu Dec 13 15:56:53 CET 2001 Paolo Molaro * x86/x86-codegen.h: x86_mov_memindex_imm() added. svn path=/trunk/mono/; revision=1565 --- ChangeLog | 5 +++++ x86/tramp.c | 2 ++ x86/x86-codegen.h | 20 +++++++++++++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index b78682c..9e48beb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Thu Dec 13 15:56:53 CET 2001 Paolo Molaro + + * x86/x86-codegen.h: x86_mov_memindex_imm() added. + 2001-11-29 Radek Doulik * ppc/tramp.c: use r12 which is volatile instead of non-volatile diff --git a/x86/tramp.c b/x86/tramp.c index 51213fb..ef34118 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -246,6 +246,8 @@ enum_retvalue: case MONO_TYPE_U: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: /* this is going to cause large pains... */ x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index b440fd3..08421ea 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.17 2001/11/27 10:30:39 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.18 2001/12/13 11:03:21 lupus Exp $ */ #ifndef X86_H @@ -804,6 +804,24 @@ typedef union { } \ } while (0) +#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + #define x86_lea_mem(inst,reg,mem) \ do { \ *(inst)++ = (unsigned char)0x8d; \ -- cgit v1.1 From faaadc7132a2cdd8c13adf7fbb79d32461759493 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 17 Dec 2001 06:50:02 +0000 Subject: 2001-12-16 Dietmar Maurer * emit-x86.c (arch_handle_exception): new code to handle exceptions inside unmanaged code. * x86.brg: impl. SAVE_LMF, RESTORE_LMF, pass implizit valuetype address as first argument. * x86.brg: pass exceptions on the stack * jit.h (ISSTRUCT): new macro to check for real value types (return false for enum types). * unicode.c (_wapi_unicode_to_utf8): byteswap UTF16 strings before passing them to iconv * file-io.c: raise exceptions if handle is invalid. svn path=/trunk/mono/; revision=1603 --- x86/x86-codegen.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 08421ea..c1cced3 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.18 2001/12/13 11:03:21 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.19 2001/12/17 06:50:02 dietmar Exp $ */ #ifndef X86_H @@ -1089,6 +1089,12 @@ typedef union { x86_membase_emit ((inst), 6, (basereg), (disp)); \ } while (0) +#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \ + } while (0) + #define x86_push_imm(inst,imm) \ do { \ *(inst)++ = (unsigned char)0x68; \ -- cgit v1.1 From 054ebda213a85e3a8a1770ec5e63831e3a0f06ba Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 20 Dec 2001 15:20:42 +0000 Subject: Thu Dec 20 20:13:07 CET 2001 Paolo Molaro * x86/tramp.c: fix create_method_pointer() to pass the arguments correctly and add check for overflow. svn path=/trunk/mono/; revision=1656 --- ChangeLog | 5 +++++ x86/tramp.c | 3 +++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 9e48beb..ef76e0d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Thu Dec 20 20:13:07 CET 2001 Paolo Molaro + + * x86/tramp.c: fix create_method_pointer() to pass the arguments + correctly and add check for overflow. + Thu Dec 13 15:56:53 CET 2001 Paolo Molaro * x86/x86-codegen.h: x86_mov_memindex_imm() added. diff --git a/x86/tramp.c b/x86/tramp.c index ef34118..aa2155d 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -375,6 +375,8 @@ mono_create_method_pointer (MonoMethod *method) * arg_pos is the offset from EBP to the incoming arg on the stack. * We just call stackval_from_data to handle all the (nasty) issues.... */ + x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), X86_EAX, 4); for (i = 0; i < sig->param_count; ++i) { x86_mov_reg_imm (p, X86_ECX, stackval_from_data); x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); @@ -455,6 +457,7 @@ mono_create_method_pointer (MonoMethod *method) x86_leave (p); x86_ret (p); + g_assert (p - code_buffer < 512); return g_memdup (code_buffer, p - code_buffer); } -- cgit v1.1 From 0635ffef0b38bcf88cd3320939c1d96bf8bb8c0e Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Thu, 3 Jan 2002 20:13:47 +0000 Subject: Fix build for new automakes, seems to work svn path=/trunk/mono/; revision=1795 --- Makefile.am | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Makefile.am b/Makefile.am index 40b50bf..36a9c0e 100644 --- a/Makefile.am +++ b/Makefile.am @@ -7,12 +7,7 @@ noinst_LTLIBRARIES = libmonoarch.la libmonoarch_la_SOURCES = unknown.c -if X86 libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la -endif -if POWERPC -libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la -endif EXTRA_DIST = ChangeLog -- cgit v1.1 From ba9f9e77bf38e3bb4b1a888d39c7b0aab8ae09bf Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 5 Jan 2002 11:15:42 +0000 Subject: Sat Jan 5 15:48:04 CET 2002 Paolo Molaro * icall.c: hack to make IsSubType work for TypeBuilders. * reflection.c: emit constructors before methods. Retrieve param names in mono_param_get_objects(). Sat Jan 5 15:45:14 CET 2002 Paolo Molaro * interp.c: allow classname:method name in --debug argument. Fix box opcode for valuetypes. Fix a few opcode to take a 16 bit index instead of 32 (stloc, ldloc, starg, etc.). Sat Jan 5 15:51:06 CET 2002 Paolo Molaro * x86/tramp.c: handle short integer return types. svn path=/trunk/mono/; revision=1852 --- ChangeLog | 4 ++++ x86/tramp.c | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index ef76e0d..f5ac2cb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Sat Jan 5 15:51:06 CET 2002 Paolo Molaro + + * x86/tramp.c: handle short integer return types. + Thu Dec 20 20:13:07 CET 2001 Paolo Molaro * x86/tramp.c: fix create_method_pointer() to pass the arguments diff --git a/x86/tramp.c b/x86/tramp.c index aa2155d..5842f6f 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -240,6 +240,12 @@ enum_retvalue: x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 1); break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 2); + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: -- cgit v1.1 From 66990d65e3ac907fe24cc5411591759ce60472b0 Mon Sep 17 00:00:00 2001 From: Matt Kimball Date: Wed, 9 Jan 2002 01:49:12 +0000 Subject: Tue Jan 8 22:38:41 MST 2002 Matt Kimball * x86/tramp.c: handle strings returned from functions in external libraries by converting to a Mono string object after the pinvoke'd function returns svn path=/trunk/mono/; revision=1923 --- ChangeLog | 7 ++++++- x86/tramp.c | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index f5ac2cb..58ad407 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ - +Tue Jan 8 22:38:41 MST 2002 Matt Kimball + + * x86/tramp.c: handle strings returned from functions in external + libraries by converting to a Mono string object after the pinvoke'd + function returns + Sat Jan 5 15:51:06 CET 2002 Paolo Molaro * x86/tramp.c: handle short integer return types. diff --git a/x86/tramp.c b/x86/tramp.c index 5842f6f..afa3a79 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -254,7 +254,21 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: /* this is going to cause large pains... */ + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + case MONO_TYPE_STRING: + if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + } + + x86_push_reg (p, X86_EAX); + x86_mov_reg_imm (p, X86_EDX, mono_string_new); + x86_call_reg (p, X86_EDX); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; -- cgit v1.1 From a18abcd00665e9bc660b90cf4c0bdf86456067af Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 10 Jan 2002 16:13:26 +0000 Subject: Thu Jan 10 19:36:27 CET 2002 Paolo Molaro * class.c: fix mono_class_from_mono_type () for szarray types. Remove unused cache check in mono_class_from_type_spec(). * icall.c: *type_from_name () functions handle simple arrays and byref. * reflection.c: handle byref and szarray types. Handle methods without body (gets P/Invoke compilation working). Handle types and fields in get_token (). * reflection.h: add rank to MonoTypeInfo. Thu Jan 10 20:59:59 CET 2002 Paolo Molaro * interp.c, interp.h: add a flag to mono_create_trampoline () to handle runtime methods. Thu Jan 10 21:01:08 CET 2002 Paolo Molaro * x86/tramp.c: mono_create_trampoline (): the runtime argument is needed to handle correctly delegates, the previous change in handling the string return type broke them. svn path=/trunk/mono/; revision=1950 --- ChangeLog | 7 +++++++ x86/tramp.c | 6 +++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 58ad407..acc1c47 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ + +Thu Jan 10 21:01:08 CET 2002 Paolo Molaro + + * x86/tramp.c: mono_create_trampoline (): the runtime argument is + needed to handle correctly delegates, the previous change in handling + the string return type broke them. + Tue Jan 8 22:38:41 MST 2002 Matt Kimball * x86/tramp.c: handle strings returned from functions in external diff --git a/x86/tramp.c b/x86/tramp.c index afa3a79..0b45b6f 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -27,7 +27,7 @@ #define ARG_SIZE sizeof (stackval) MonoPIFunc -mono_create_trampoline (MonoMethod *method) +mono_create_trampoline (MonoMethod *method, int runtime) { MonoMethodSignature *sig; unsigned char *p, *code_buffer; @@ -176,7 +176,7 @@ enum_marshal: * If it is an internalcall we assume it's the object we want. * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. */ - if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { x86_push_membase (p, X86_EDX, arg_pos); break; } @@ -258,7 +258,7 @@ enum_retvalue: x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; case MONO_TYPE_STRING: - if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; -- cgit v1.1 From b5472227702fc528149111f0c4406c9dadb9a9e0 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 14 Jan 2002 07:00:24 +0000 Subject: Mon Jan 14 11:50:16 CET 2002 Paolo Molaro * x86/x86-codegen.c: added overflow condition code and some aliases for the other ccs. svn path=/trunk/mono/; revision=1968 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 28 +++++++++++++++++----------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/ChangeLog b/ChangeLog index acc1c47..83b4f34 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Jan 14 11:50:16 CET 2002 Paolo Molaro + + * x86/x86-codegen.c: added overflow condition code and some aliases + for the other ccs. + Thu Jan 10 21:01:08 CET 2002 Paolo Molaro * x86/tramp.c: mono_create_trampoline (): the runtime argument is diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index c1cced3..affd563 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.19 2001/12/17 06:50:02 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.20 2002/01/14 07:00:24 lupus Exp $ */ #ifndef X86_H @@ -64,16 +64,18 @@ typedef enum { // integer conditions codes */ typedef enum { - X86_CC_EQ = 0, - X86_CC_NE, - X86_CC_LT, - X86_CC_LE, - X86_CC_GT, - X86_CC_GE, - X86_CC_LZ, - X86_CC_GEZ, - X86_CC_P, - X86_CC_NP, + X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0, + X86_CC_NE = 1, X86_CC_NZ = 1, + X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2, + X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3, + X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4, + X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5, + X86_CC_LZ = 6, X86_CC_S = 6, + X86_CC_GEZ = 7, X86_CC_NS = 7, + X86_CC_P = 8, X86_CC_PE = 8, + X86_CC_NP = 9, X86_CC_PO = 9, + X86_CC_O = 10, + X86_CC_NO = 11, X86_NCC } X86_CC; /* @@ -106,6 +108,8 @@ x86_cc_unsigned_map [X86_NCC] = { 0x79, /* gez */ 0x7a, /* p */ 0x7b, /* np */ + 0x70, /* o */ + 0x71, /* no */ }; static const unsigned char @@ -120,6 +124,8 @@ x86_cc_signed_map [X86_NCC] = { 0x79, /* gez */ 0x7a, /* p */ 0x7b, /* np */ + 0x70, /* o */ + 0x71, /* no */ }; typedef union { -- cgit v1.1 From 5291c24b937d193ef9861c87421bab87e0fcc4da Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Mon, 21 Jan 2002 20:06:20 +0000 Subject: ppc changes svn path=/trunk/mono/; revision=2090 --- ppc/ppc-codegen.h | 144 ++++++++++++++++++---------- ppc/test.c | 26 ++--- ppc/tramp.c | 276 +++++++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 325 insertions(+), 121 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index b4915f2..624f088 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -8,64 +8,104 @@ #include typedef enum { - r0 = 0, - r1, - r2, - r3, - r4, - r5, - r6, - r7, - r8, - r9, - r10, - r11, - r12, - r13, - r14, - r15, - r16, - r17, - r18, - r19, - r20, - r21, - r22, - r23, - r24, - r25, - r26, - r27, - r28, - r29, - r30, - r31 + ppc_r0 = 0, + ppc_r1, + ppc_r2, + ppc_r3, + ppc_r4, + ppc_r5, + ppc_r6, + ppc_r7, + ppc_r8, + ppc_r9, + ppc_r10, + ppc_r11, + ppc_r12, + ppc_r13, + ppc_r14, + ppc_r15, + ppc_r16, + ppc_r17, + ppc_r18, + ppc_r19, + ppc_r20, + ppc_r21, + ppc_r22, + ppc_r23, + ppc_r24, + ppc_r25, + ppc_r26, + ppc_r27, + ppc_r28, + ppc_r29, + ppc_r30, + ppc_r31 } PPCIntRegister; typedef enum { - lr = 256, + ppc_f0 = 0, + ppc_f1, + ppc_f2, + ppc_f3, + ppc_f4, + ppc_f5, + ppc_f6, + ppc_f7, + ppc_f8, + ppc_f9, + ppc_f10, + ppc_f11, + ppc_f12, + ppc_f13, + ppc_f14, + ppc_f15, + ppc_f16, + ppc_f17, + ppc_f18, + ppc_f19, + ppc_f20, + ppc_f21, + ppc_f22, + ppc_f23, + ppc_f24, + ppc_f25, + ppc_f26, + ppc_f27, + ppc_f28, + ppc_f29, + ppc_f30, + ppc_f31 +} PPCFloatRegister; + +typedef enum { + ppc_lr = 256, } PPCSpecialRegister; -#define emit32(c,x) *((guint32 *) c) = x; ((guint32 *)c)++ -#define emit32_bad(c,val) { \ -guint32 x = val; \ -c[0] = x & 0xff; x >>= 8; \ -c[1] = x & 0xff; x >>= 8; \ -c[2] = x & 0xff; x >>= 8; \ -c[3] = x; c += 4; } +#define ppc_emit32(c,x) *((guint32 *) c) = x; ((guint32 *)c)++ + +#define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_addis(c,D,A,d) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_li(c,D,v) ppc_addi (c, D, 0, v); +#define ppc_lis(c,D,v) ppc_addis (c, D, 0, v); +#define ppc_lwz(c,D,d,a) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_stw(c,S,d,a) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_stb(c,S,d,a) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_stwu(c,s,d,a) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) +#define ppc_ori(c,S,A,u) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(u)) +#define ppc_mr(c,a,s) ppc_or (c, a, s, s) +#define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1)) +#define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr) +#define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) +#define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S) + +#define ppc_blrl(c) ppc_emit32 (c, 0x4e800021) +#define ppc_blr(c) ppc_emit32 (c, 0x4e800020) -#define addi(c,D,A,d) emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) -#define lwz(c,D,d,a) emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) -#define stw(c,S,d,a) emit32 (c, (36 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) -#define stwu(c,s,d,a) emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) -#define or(c,a,s,b) emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) -#define mr(c,a,s) or (c, a, s, s) -#define mfspr(c,D,spr) emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1)) -#define mflr(c,D) mfspr (c, D, lr) -#define mtspr(c,spr,S) emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) -#define mtlr(c,S) mtspr (c, lr, S) +#define ppc_lfs(c,D,d,A) ppc_emit32 (c, (48 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_lfd(c,D,d,A) ppc_emit32 (c, (50 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_stfs(c,S,d,a) ppc_emit32 (c, (52 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_stfd(c,S,d,a) ppc_emit32 (c, (54 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) -#define blrl(c) emit32(c, 0x4e800021) -#define blr(c) emit32(c, 0x4e800020) #endif diff --git a/ppc/test.c b/ppc/test.c index 0b56490..f80e5bb 100644 --- a/ppc/test.c +++ b/ppc/test.c @@ -12,19 +12,19 @@ int main() { printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - stwu (p, r1, -32, r1); - mflr (p, r0); - stw (p, r31, 28, r1); - or (p, r1, r2, r3); - mr (p, r31, r1); - lwz (p, r11, 0, r1); - mtlr (p, r0); - blr (p); - addi (p, r6, r6, 16); - + ppc_stwu (p, ppc_r1, -32, ppc_r1); + ppc_mflr (p, ppc_r0); + ppc_stw (p, ppc_r31, 28, ppc_r1); + ppc_or (p, ppc_r1, ppc_r2, ppc_r3); + ppc_mr (p, ppc_r31, ppc_r1); + ppc_lwz (p, ppc_r11, 0, ppc_r1); + ppc_mtlr (p, ppc_r0); + ppc_blr (p); + ppc_addi (p, ppc_r6, ppc_r6, 16); + for (cp = code; cp < p; cp++) { printf (".byte 0x%x\n", *cp); - } - - return 0; + } + + return 0; } diff --git a/ppc/tramp.c b/ppc/tramp.c index ce7b53b..8d7efae 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -21,9 +21,14 @@ #endif /* void -fake_func (gpointer (*callme)(), void *retval, void *this_obj, stackval *arguments) +fake_func (gdouble (*callme)(), stackval *retval, void *this_obj, stackval *arguments) { - *(gpointer*)retval = (*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); + guint32 i = 0xc002becd; + + callme = (gpointer) 0x100fabcd; + + *(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); + *(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); } */ #define MIN_CACHE_LINE 8 @@ -47,8 +52,8 @@ flush_icache (guint8 *code, guint size) asm ("isync"); } -#define NOT_IMPLEMENTED \ - g_error ("FIXME: Not yet implemented. (trampoline)"); +#define NOT_IMPLEMENTED(x) \ + g_error ("FIXME: %s is not yet implemented. (trampoline)", x); #define PROLOG_INS 8 #define CALL_INS 2 @@ -80,7 +85,7 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } static void inline -calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) +calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint *strings, gint runtime) { MonoMethodSignature *sig; guint i, fr, gr; @@ -89,6 +94,7 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) fr = gr = 0; *stack_size = MINIMAL_STACK_SIZE*4; *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS)*4; + *strings = 0; sig = method->signature; if (sig->hasthis) { @@ -97,7 +103,8 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { - g_error ("FIXME, trampoline: byref"); + add_general (&gr, stack_size, code_size, TRUE); + continue; } simpletype = sig->params [i]->type; enum_calc_size: @@ -127,16 +134,28 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); add_general (&gr, stack_size, code_size, TRUE); + *code_size += 4; break; case MONO_TYPE_STRING: - NOT_IMPLEMENTED; + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + add_general (&gr, stack_size, code_size, TRUE); + break; + } + (*strings) ++; + *code_size += 12*4; + *stack_size += 4; break; case MONO_TYPE_I8: add_general (&gr, stack_size, code_size, FALSE); break; case MONO_TYPE_R4: case MONO_TYPE_R8: - NOT_IMPLEMENTED; + if (fr < 7) { + *code_size += 4; + fr ++; + } else { + NOT_IMPLEMENTED ("R8 arg"); + } break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); @@ -144,7 +163,7 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size) } if (sig->ret->byref) { - g_error ("trampoline, retval byref - TODO"); + *code_size += 8; } else { simpletype = sig->ret->type; enum_retvalue: @@ -160,10 +179,10 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_R4: case MONO_TYPE_R8: - *code_size += 8; - break; + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: - NOT_IMPLEMENTED; + *code_size += 8; break; case MONO_TYPE_I8: *code_size += 12; @@ -173,6 +192,8 @@ enum_retvalue: simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } + NOT_IMPLEMENTED ("valuetype"); + break; case MONO_TYPE_VOID: break; default: @@ -180,6 +201,15 @@ enum_retvalue: } } + if (*strings) { + /* space to keep parameters and prepared strings */ + *stack_size += 8; + *code_size += 16; + if (sig->hasthis) { + *stack_size += 4; + *code_size += 12; + } + } /* align stack size to 16 */ printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size); *stack_size = (*stack_size + 15) & ~15; @@ -187,50 +217,80 @@ enum_retvalue: } static inline guint8 * -emit_prolog (guint8 *p, guint stack_size) +emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) { /* function prolog */ - stwu (p, r1, -stack_size, r1); /* sp <--- sp - 48, sp[0] <---- sp save sp, allocate stack */ - mflr (p, r0); /* r0 <--- LR */ - stw (p, r31, stack_size - 4, r1); /* sp[44] <--- r31 save r31 */ - stw (p, r0, stack_size + 4, r1); /* sp[52] <--- LR save return address in "callme" stack frame */ - mr (p, r31, r1); /* r31 <--- sp */ + ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ + ppc_mflr (p, ppc_r0); /* r0 <--- LR */ + ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ + ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ + ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ /* handle our parameters */ - mr (p, r12, r6); /* keep "arguments" in register */ - mr (p, r0, r3); /* keep "callme" in register */ - stw (p, r4, 8, r31); /* preserve "retval", sp[8] */ + if (strings) { + ppc_stw (p, ppc_r30, 16, ppc_r1); + ppc_stw (p, ppc_r29, 20, ppc_r1); + if (method->signature->hasthis) { + ppc_stw (p, ppc_r28, 24, ppc_r1); + } + ppc_mr (p, ppc_r30, ppc_r6); /* args */ + ppc_mr (p, ppc_r29, ppc_r3); /* callme */ + if (method->signature->hasthis) { + ppc_mr (p, ppc_r28, ppc_r5); /* this */ + } + } else { + ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ + ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ + } + ppc_stw (p, ppc_r4, 8, ppc_r31); /* preserve "retval", sp[+8] */ return p; } +#define ARG_BASE strings ? ppc_r30 : ppc_r12 #define SAVE_4_IN_GENERIC_REGISTER \ if (gr < GENERAL_REGS) { \ - lwz (p, r3 + gr, i*16, r12); \ + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ gr ++; \ } else { \ - NOT_IMPLEMENTED; \ + NOT_IMPLEMENTED("save on stack"); \ } inline static guint8* -emit_save_parameters (guint8 *p, MonoMethod *method) +emit_save_parameters (guint8 *p, MonoMethod *method, guint strings, gint runtime) { MonoMethodSignature *sig; - guint i, fr, gr; + guint i, fr, gr, act_strs; guint32 simpletype; fr = gr = 0; - + act_strs = 0; sig = method->signature; + + if (strings) { + for (i = 0; i < sig->param_count; ++i) { + if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING) { + ppc_lis (p, ppc_r0, (guint32) mono_string_to_utf8 >> 16); + ppc_lwz (p, ppc_r3, i*16, ppc_r30); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_to_utf8 & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + ppc_stw (p, ppc_r3, 24 + act_strs, ppc_r31); + act_strs += 4; + } + } + } + if (sig->hasthis) { - g_warning ("FIXME: trampoline, decide on MONO_CALL_THISCALL"); - mr (p, r3, r5); + ppc_mr (p, ppc_r3, ppc_r5); gr ++; } + act_strs = 0; for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { - g_error ("FIXME, trampoline: byref"); + SAVE_4_IN_GENERIC_REGISTER; + continue; } simpletype = sig->params [i]->type; enum_calc_size: @@ -259,25 +319,52 @@ emit_save_parameters (guint8 *p, MonoMethod *method) if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); - SAVE_4_IN_GENERIC_REGISTER; + if (gr < GENERAL_REGS) { + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); + gr ++; + } else { + NOT_IMPLEMENTED ("save value type on stack"); + } break; case MONO_TYPE_STRING: - NOT_IMPLEMENTED; + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + SAVE_4_IN_GENERIC_REGISTER; + } else { + if (gr < 8) { + ppc_lwz (p, ppc_r3 + gr, 24 + act_strs * 4, ppc_r31); + gr ++; + act_strs ++; + } else + NOT_IMPLEMENTED ("string on stack"); + } break; case MONO_TYPE_I8: if (gr < 7) { g_warning ("check endianess"); - lwz (p, r3 + gr, i*16, r12); + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); gr ++; - lwz (p, r3 + gr, i*17, r12); + ppc_lwz (p, ppc_r3 + gr, i*17, ARG_BASE); gr ++; } else { - NOT_IMPLEMENTED; + NOT_IMPLEMENTED ("i8 on stack"); } break; case MONO_TYPE_R4: + if (fr < 7) { + ppc_lfs (p, ppc_f1 + fr, i*16, ARG_BASE); + fr ++; + } else { + NOT_IMPLEMENTED ("r4 on stack"); + } + break; case MONO_TYPE_R8: - NOT_IMPLEMENTED; + if (fr < 7) { + ppc_lfd (p, ppc_f1 + fr, i*16, ARG_BASE); + fr ++; + } else { + NOT_IMPLEMENTED ("r8 on stack"); + } break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); @@ -306,48 +393,125 @@ alloc_code_memory (guint code_size) } static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethod *method) +emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint strings) { + MonoMethodSignature *sig = method->signature; + guint32 simpletype; + /* call "callme" */ - mtlr (p, r0); - blrl (p); + ppc_mtlr (p, strings ? ppc_r29 : ppc_r0); + ppc_blrl (p); /* get return value */ - lwz (p, r9, 8, r31); /* load "retval" address */ - stw (p, r3, 0, r9); /* save return value (r3) to "retval" */ + if (sig->ret->byref) { + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stb (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; + case MONO_TYPE_R4: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stfs (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ + break; + case MONO_TYPE_R8: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stfd (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ + break; + case MONO_TYPE_I8: + g_warning ("check endianess"); + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + ppc_stw (p, ppc_r4, 4, ppc_r9); /* save return value (r3) to "retval" */ + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + NOT_IMPLEMENTED ("retval valuetype"); + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } return p; } static inline guint8 * -emit_epilog (guint8 *p) +emit_epilog (guint8 *p, MonoMethod *method, guint strings) { + if (strings) { + MonoMethodSignature *sig = method->signature; + guint i, act_strs; + + /* free allocated memory */ + act_strs = 0; + for (i = 0; i < sig->param_count; ++i) { + if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING) { + ppc_lis (p, ppc_r0, (guint32) g_free >> 16); + ppc_lwz (p, ppc_r3, 24 + act_strs, ppc_r31); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) g_free & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + act_strs += 4; + } + } + + /* restore volatile registers */ + ppc_lwz (p, ppc_r30, 16, ppc_r1); + ppc_lwz (p, ppc_r29, 20, ppc_r1); + if (method->signature->hasthis) { + ppc_lwz (p, ppc_r28, 24, ppc_r1); + } + } + /* function epilog */ - lwz (p, r11, 0, r1); /* r11 <--- sp[0] load backchain from caller's function */ - lwz (p, r0, 4, r11); /* r0 <--- r11[4] load return address */ - mtlr (p, r0); /* LR <--- r0 set return address */ - lwz (p, r31, -4, r11); /* r31 <--- r11[-4] restore r31 */ - mr (p, r1, r11); /* sp <--- r11 restore stack */ - blr (p); /* return */ + ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ + ppc_lwz (p, ppc_r0, 4, ppc_r11); /* r0 <--- r11[4] load return address */ + ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ + ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ + ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ + ppc_blr (p); /* return */ return p; } MonoPIFunc -mono_create_trampoline (MonoMethod *method) +mono_create_trampoline (MonoMethod *method, int runtime) { guint8 *p, *code_buffer; - guint stack_size, code_size; - - printf ("\nPInvoke [start emiting]\n"); + guint stack_size, code_size, strings; - calculate_sizes (method, &stack_size, &code_size); + printf ("\nPInvoke [start emiting] %s\n", method->name); + calculate_sizes (method, &stack_size, &code_size, &strings, runtime); p = code_buffer = alloc_code_memory (code_size); - p = emit_prolog (p, stack_size); - p = emit_save_parameters (p, method); - p = emit_call_and_store_retval (p, method); - p = emit_epilog (p); + p = emit_prolog (p, method, stack_size, strings); + p = emit_save_parameters (p, method, strings, runtime); + p = emit_call_and_store_retval (p, method, strings); + p = emit_epilog (p, method, strings); /* { guchar *cp; -- cgit v1.1 From 2d3dbc6213f3e12d1c7b332d80fec81384612bf8 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Thu, 24 Jan 2002 01:00:53 +0000 Subject: 2002-01-23 Miguel de Icaza * x86/tramp.c (mono_create_trampoline): Do not try to create a mono_string_new if the return value from the PInvoke code is NULL. 2002-01-23 Miguel de Icaza * genwrapper.pl: Added wrappers for the mono_glob functions. * glob.c: New file, with globing functions used by the Directory code. svn path=/trunk/mono/; revision=2139 --- ChangeLog | 5 +++++ x86/tramp.c | 3 +++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 83b4f34..2be3109 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-01-23 Miguel de Icaza + + * x86/tramp.c (mono_create_trampoline): Do not try to create a + mono_string_new if the return value from the PInvoke code is + NULL. Mon Jan 14 11:50:16 CET 2002 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index 0b45b6f..a94a863 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -264,6 +264,9 @@ enum_retvalue: break; } + /* If the argument is non-null, then convert the value back */ + x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); + x86_branch8 (p, X86_CC_EQ, 11, FALSE); x86_push_reg (p, X86_EAX); x86_mov_reg_imm (p, X86_EDX, mono_string_new); x86_call_reg (p, X86_EDX); -- cgit v1.1 From 4a977a50d70eb75760d9555854845d32595c4093 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 1 Feb 2002 11:22:35 +0000 Subject: Fri Feb 1 16:03:53 CET 2002 Paolo Molaro * interp.c: exception fixes. Use mono_method_pointer_get () to easy porting to other archs. Some support for overflow detection. Fri Feb 1 16:03:00 CET 2002 Paolo Molaro * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get (). Fri Feb 1 16:13:20 CET 2002 Paolo Molaro * class.c: add asserts if we are ever going to scribble over memory. * socket-io.c: not all systems have AF_IRDA defined. svn path=/trunk/mono/; revision=2223 --- ChangeLog | 5 +++++ ppc/tramp.c | 11 +++++++++++ x86/tramp.c | 14 +++++++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 2be3109..7c572e7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Fri Feb 1 16:03:00 CET 2002 Paolo Molaro + + * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get (). + 2002-01-23 Miguel de Icaza * x86/tramp.c (mono_create_trampoline): Do not try to create a diff --git a/ppc/tramp.c b/ppc/tramp.c index 8d7efae..871686e 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -559,3 +559,14 @@ mono_create_method_pointer (MonoMethod *method) } +/* + * mono_create_method_pointer () will insert a pointer to the MonoMethod + * so that the interp can easily get at the data: this function will retrieve + * the method from the code stream. + */ +MonoMethod* +mono_method_pointer_get (void *code) +{ + return NULL; +} + diff --git a/x86/tramp.c b/x86/tramp.c index a94a863..93e2664 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -484,4 +484,16 @@ mono_create_method_pointer (MonoMethod *method) return g_memdup (code_buffer, p - code_buffer); } - +/* + * mono_create_method_pointer () will insert a pointer to the MonoMethod + * so that the interp can easily get at the data: this function will retrieve + * the method from the code stream. + */ +MonoMethod* +mono_method_pointer_get (void *code) +{ + unsigned char *c = code; + if (c [2] != 'M' || c [3] != 'o') + return NULL; + return *(MonoMethod**)(code + sizeof (gpointer)); +} -- cgit v1.1 From dd029fa4245c99073ae6863dcb8e1560cc1eedc0 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 1 Feb 2002 12:04:34 +0000 Subject: SHR/SHL impl. svn path=/trunk/mono/; revision=2224 --- x86/x86-codegen.h | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index affd563..0bee23e 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.20 2002/01/14 07:00:24 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.21 2002/02/01 12:04:33 dietmar Exp $ */ #ifndef X86_H @@ -566,6 +566,36 @@ typedef union { * Multi op shift missing. */ +#define x86_shrd_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xad; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xac; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + x86_imm_emit8 ((inst), (shamt)); \ + } while (0) + +#define x86_shld_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xa5; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_shld_reg_imm(inst,dreg,reg,shamt) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xa4; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + x86_imm_emit8 ((inst), (shamt)); \ + } while (0) + /* * EDX:EAX = EAX * rm */ -- cgit v1.1 From d7a858a6ac5bc37435a157cf41eb63818905a7ea Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 11 Feb 2002 07:42:10 +0000 Subject: Mon Feb 11 12:32:35 CET 2002 Paolo Molaro * x86/tramp.c: fix handling of multiple marshaleed strings. * x86/x86-codegen.h: some code to patch branch displacements. svn path=/trunk/mono/; revision=2308 --- ChangeLog | 5 +++++ x86/tramp.c | 3 +-- x86/x86-codegen.h | 22 +++++++++++++++++++++- 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 7c572e7..a85e865 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Feb 11 12:32:35 CET 2002 Paolo Molaro + + * x86/tramp.c: fix handling of multiple marshaleed strings. + * x86/x86-codegen.h: some code to patch branch displacements. + Fri Feb 1 16:03:00 CET 2002 Paolo Molaro * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get (). diff --git a/x86/tramp.c b/x86/tramp.c index 93e2664..e9fef81 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -304,9 +304,8 @@ enum_retvalue: * free the allocated strings. */ if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { - if (local_size) - x86_mov_reg_imm (p, X86_EDX, g_free); for (i = 1; i <= local_size; ++i) { + x86_mov_reg_imm (p, X86_EDX, g_free); x86_push_membase (p, X86_EBP, LOC_POS * i); x86_call_reg (p, X86_EDX); } diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 0bee23e..b57e50e 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.21 2002/02/01 12:04:33 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.22 2002/02/11 07:42:10 lupus Exp $ */ #ifndef X86_H @@ -260,6 +260,26 @@ typedef union { } \ } while (0) +/* disp will need to be relative to the start position... */ +#define x86_patch(ins,disp) \ + do { \ + unsigned char* pos = (ins) + 1; \ + int size = 0; \ + switch (*(ins)) { \ + case 0xe9: ++size; break; \ + case 0x0f: ++size; ++pos; break; \ + case 0xeb: \ + case 0x70: case 0x71: case 0x72: case 0x73: \ + case 0x74: case 0x75: case 0x76: case 0x77: \ + case 0x78: case 0x79: case 0x7a: case 0x7b: \ + case 0x7c: case 0x7d: case 0x7e: case 0x7f: \ + break; \ + default: assert (0); \ + } \ + if (size) x86_imm_emit32 (pos, (disp)); \ + else x86_imm_emit8 (pos, (disp)); \ + } while (0) + #define x86_breakpoint(inst) \ do { \ *(inst)++ = 0xcc; \ -- cgit v1.1 From 6f7cdfa857058ee3662e1662190315c294188ae0 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 11 Feb 2002 13:49:06 +0000 Subject: Mon Feb 11 18:40:04 CET 2002 Paolo Molaro * sparc/*: sparc codegen header and some untested trampoline code. svn path=/trunk/mono/; revision=2315 --- ChangeLog | 4 + Makefile.am | 2 +- sparc/Makefile.am | 7 + sparc/sparc-codegen.h | 486 ++++++++++++++++++++++++++++++++++++++++++++++++++ sparc/test.c | 123 +++++++++++++ sparc/tramp.c | 342 +++++++++++++++++++++++++++++++++++ 6 files changed, 963 insertions(+), 1 deletion(-) create mode 100644 sparc/Makefile.am create mode 100644 sparc/sparc-codegen.h create mode 100644 sparc/test.c create mode 100644 sparc/tramp.c diff --git a/ChangeLog b/ChangeLog index a85e865..10a73f6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Mon Feb 11 18:40:04 CET 2002 Paolo Molaro + + * sparc/*: sparc codegen header and some untested trampoline code. + Mon Feb 11 12:32:35 CET 2002 Paolo Molaro * x86/tramp.c: fix handling of multiple marshaleed strings. diff --git a/Makefile.am b/Makefile.am index 36a9c0e..fc76039 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc +DIST_SUBDIRS = x86 ppc sparc INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/sparc/Makefile.am b/sparc/Makefile.am new file mode 100644 index 0000000..e0f7689 --- /dev/null +++ b/sparc/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-sparc.la + +libmonoarch_sparc_la_SOURCES = tramp.c sparc-codegen.h + diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h new file mode 100644 index 0000000..04a23c3 --- /dev/null +++ b/sparc/sparc-codegen.h @@ -0,0 +1,486 @@ +#ifndef __SPARC_CODEGEN_H__ +#define __SPARC_CODEGEN_H__ + +typedef enum { + sparc_r0 = 0, + sparc_r1 = 1, + sparc_r2 = 2, + sparc_r3 = 3, + sparc_r4 = 4, + sparc_r5 = 5, + sparc_r6 = 6, + sparc_r7 = 7, + sparc_r8 = 8, + sparc_r9 = 9, + sparc_r10 = 10, + sparc_r11 = 11, + sparc_r12 = 12, + sparc_r13 = 13, + sparc_r14 = 14, + sparc_r15 = 15, + sparc_r16 = 16, + sparc_r17 = 17, + sparc_r18 = 18, + sparc_r19 = 19, + sparc_r20 = 20, + sparc_r21 = 21, + sparc_r22 = 22, + sparc_r23 = 23, + sparc_r24 = 24, + sparc_r25 = 25, + sparc_r26 = 26, + sparc_r27 = 27, + sparc_r28 = 28, + sparc_r29 = 29, + sparc_r30 = 30, + sparc_r31 = 31, + /* aliases */ + /* global registers */ + sparc_g0 = 0, sparc_zero = 0, + sparc_g1 = 1, + sparc_g2 = 2, + sparc_g3 = 3, + sparc_g4 = 4, + sparc_g5 = 5, + sparc_g6 = 6, + sparc_g7 = 7, + /* out registers */ + sparc_o0 = 8, + sparc_o1 = 9, + sparc_o2 = 10, + sparc_o3 = 11, + sparc_o4 = 12, + sparc_o5 = 13, + sparc_o6 = 14, sparc_sp = 14, + sparc_o7 = 15, sparc_callsite = 15, + /* local registers */ + sparc_l0 = 16, + sparc_l1 = 17, + sparc_l2 = 18, + sparc_l3 = 19, + sparc_l4 = 20, + sparc_l5 = 21, + sparc_l6 = 22, + sparc_l7 = 23, + /* in registers */ + sparc_i0 = 24, + sparc_i1 = 25, + sparc_i2 = 26, + sparc_i3 = 27, + sparc_i4 = 28, + sparc_i5 = 29, + sparc_i6 = 30, sparc_fp = 30, + sparc_i7 = 31, + sparc_nreg = 32, + /* floating point registers */ + sparc_f0 = 0, + sparc_f1 = 1, + sparc_f2 = 2, + sparc_f3 = 3, + sparc_f4 = 4, + sparc_f5 = 5, + sparc_f6 = 6, + sparc_f7 = 7, + sparc_f8 = 8, + sparc_f9 = 9, + sparc_f10 = 10, + sparc_f11 = 11, + sparc_f12 = 12, + sparc_f13 = 13, + sparc_f14 = 14, + sparc_f15 = 15, + sparc_f16 = 16, + sparc_f17 = 17, + sparc_f18 = 18, + sparc_f19 = 19, + sparc_f20 = 20, + sparc_f21 = 21, + sparc_f22 = 22, + sparc_f23 = 23, + sparc_f24 = 24, + sparc_f25 = 25, + sparc_f26 = 26, + sparc_f27 = 27, + sparc_f28 = 28, + sparc_f29 = 29, + sparc_f30 = 30, + sparc_f31 = 31, +} SparcRegister; + +typedef enum { + sparc_bn = 0, sparc_bnever = 0, + sparc_be = 1, + sparc_ble = 2, + sparc_bl = 3, + sparc_bleu = 4, + sparc_bcs = 5, sparc_blu = 5, + sparc_bneg = 6, + sparc_bvs = 7, sparc_boverflow = 7, + sparc_ba = 8, sparc_balways = 8, + sparc_bne = 9, + sparc_bg = 10, + sparc_bge = 11, + sparc_bgu = 12, + sparc_bcc = 13, sparc_beu = 13, + sparc_bpos = 14, + sparc_bvc = 15 +} SparcCond; + +typedef enum { + /* with fcmp */ + sparc_feq = 0, + sparc_fl = 1, + sparc_fg = 2, + sparc_unordered = 3, + /* branch ops */ + sparc_fba = 8, + sparc_fbn = 0, + sparc_fbu = 7, + sparc_fbg = 6, + sparc_fbug = 5, + sparc_fbl = 4, + sparc_fbul = 3, + sparc_fblg = 2, + sparc_fbne = 1, + sparc_fbe = 9, + sparc_fbue = 10, + sparc_fbge = 11, + sparc_fbuge = 12, + sparc_fble = 13, + sparc_fbule = 14, + sparc_fbo = 15 +} SparcFCond; + +typedef enum { + /* fop1 format */ + sparc_fitos = 196, + sparc_fitod = 200, + sparc_fstoi = 209, + sparc_fdtoi = 210, + sparc_fstod = 201, + sparc_fdtos = 198, + sparc_fmov = 1, + sparc_fneg = 5, + sparc_fabs = 9, + sparc_fsqrts = 41, + sparc_fsqrtd = 42, + sparc_fadds = 65, + sparc_faddd = 66, + sparc_fsubs = 69, + sparc_fsubd = 70, + sparc_fmuls = 73, + sparc_fmuld = 74, + sparc_fdivs = 77, + sparc_fdivd = 78, + /* fop2 format */ + sparc_fcmps = 81, + sparc_fcmpd = 82 +} SparcFOp; + +typedef struct { + unsigned int op : 2; /* always 1 */ + unsigned int disp : 30; +} sparc_format1; + +typedef struct { + unsigned int op : 2; /* always 0 */ + unsigned int rd : 5; + unsigned int op2 : 3; + unsigned int disp : 22; +} sparc_format2a; + +typedef struct { + unsigned int op : 2; /* always 0 */ + unsigned int a : 1; + unsigned int cond : 4; + unsigned int op2 : 3; + unsigned int disp : 22; +} sparc_format2b; + +typedef struct { + unsigned int op : 2; /* 2 or 3 */ + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int i : 1; + unsigned int asi : 8; + unsigned int rs2 : 5; +} sparc_format3a; + +typedef struct { + unsigned int op : 2; /* 2 or 3 */ + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int i : 1; + unsigned int imm : 13; +} sparc_format3b; + +typedef struct { + unsigned int op : 2; /* 2 or 3 */ + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int opf : 9; + unsigned int rs2 : 5; +} sparc_format3c; + +/* for use in logical ops, use 0 to not set flags */ +#define sparc_cc 16 + +#define sparc_encode_call(ins,addr) \ + do { \ + sparc_format1 *__f = (sparc_format1*)(ins); \ + __f->op = 1; \ + __f->disp = ((unsigned int)(addr) >> 2); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format2a(ins,val,oper,dest) \ + do { \ + sparc_format2a *__f = (sparc_format2a*)(ins); \ + __f->op = 0; \ + __f->rd = (dest); \ + __f->op2 = (oper); \ + __f->disp = (val) & 0x3fffff; \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format2b(ins,aval,bcond,oper,disp22) \ + do { \ + sparc_format2b *__f = (sparc_format2b*)(ins); \ + __f->op = 0; \ + __f->a = (aval); \ + __f->cond = (bcond); \ + __f->op2 = (oper); \ + __f->disp = (disp22); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format3a(ins,opval,asival,r1,r2,oper,dest) \ + do { \ + sparc_format3a *__f = (sparc_format3a*)(ins); \ + __f->op = (opval); \ + __f->asi = (asival); \ + __f->i = 0; \ + __f->rd = (dest); \ + __f->rs1 = (r1); \ + __f->rs2 = (r2); \ + __f->op3 = (oper); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format3b(ins,opval,r1,val,oper,dest) \ + do { \ + sparc_format3b *__f = (sparc_format3b*)(ins); \ + __f->op = (opval); \ + __f->imm = (val); \ + __f->i = 1; \ + __f->rd = (dest); \ + __f->rs1 = (r1); \ + __f->op3 = (oper); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format3c(ins,opval,opfval,r1,oper,r2,dest) \ + do { \ + sparc_format3c *__f = (sparc_format3c*)(ins); \ + __f->op = (opval); \ + __f->opf = (opfval); \ + __f->rd = (dest); \ + __f->rs1 = (r1); \ + __f->rs2 = (r2); \ + __f->op3 = (oper); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +/* is it useful to provide a non-default value? */ +#define sparc_asi 0x0 + +/* load */ +#define sparc_ldsb(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),9,(dest)) +#define sparc_ldsb_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),9,(dest)) + +#define sparc_ldsh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),10,(dest)) +#define sparc_ldsh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),10,(dest)) + +#define sparc_ldub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),1,(dest)) +#define sparc_ldub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),1,(dest)) + +#define sparc_lduh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),2,(dest)) +#define sparc_lduh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),2,(dest)) + +#define sparc_ld(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),0,(dest)) +#define sparc_ld_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),0,(dest)) + +#define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest)) +#define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest)) + +#define sparc_ldf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),32,(dest)) +#define sparc_ldf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),32,(dest)) + +#define sparc_lddf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),35,(dest)) +#define sparc_lddf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),35,(dest)) + +/* store */ +#define sparc_stb(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),5,(src)) +#define sparc_stb_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),5,(src)) + +#define sparc_sth(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),6,(src)) +#define sparc_sth_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),6,(src)) + +#define sparc_st(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),4,(src)) +#define sparc_st_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),4,(src)) + +#define sparc_std(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),7,(src)) +#define sparc_std_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),7,(src)) + +#define sparc_stf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),36,(src)) +#define sparc_stf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),36,(src)) + +#define sparc_stdf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),39,(src)) +#define sparc_stdf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),39,(src)) + +/* swap */ +#define sparc_ldstub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),13,(dest)) +#define sparc_ldstub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),13,(dest)) + +#define sparc_swap(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),15,(dest)) +#define sparc_swap_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),15,(dest)) + +/* misc */ +/* note: with sethi val is the full 32 bit value (think of it as %hi(val)) */ +#define sparc_sethi(ins,val,dest) sparc_encode_format2a((ins),((val)>>10),4,(dest)) + +#define sparc_nop(ins) sparc_sethi((ins),0,sparc_zero) + +#define sparc_save(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),60,(dest)) +#define sparc_save_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),60,(dest)) + +#define sparc_restore(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),61,(dest)) +#define sparc_restore_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),61,(dest)) + +#define sparc_jmpl(ins,base,disp,dest) sparc_encode_format3a((ins),2,0,(base),(disp),56,(dest)) +#define sparc_jmpl_imm(ins,base,disp,dest) sparc_encode_format3b((ins),2,(base),(disp),56,(dest)) + +#define sparc_call_simple(ins,addr) sparc_encode_call((ins),((unsigned int)(addr)>>2)) + +#define sparc_rdy(ins,dest) sparc_encode_format3a((ins),2,0,0,0,40,(dest)) + +#define sparc_wry(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),48,0) +#define sparc_wry_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),48,0) + +/* stbar, unimp, flush */ +#define sparc_stbar(ins) sparc_encode_format3a((ins),2,0,15,0,40,0) +#define sparc_unimp(ins,val) sparc_encode_format2b((ins),0,0,0,(val)) + +#define sparc_flush(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),59,0) +#define sparc_flush_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),59,0) + +/* trap */ + +/* alu fop */ +/* provide wrappers for: fitos, fitod, fstoi, fdtoi, fstod, fdtos, fmov, fneg, fabs */ + +#define sparc_fop(ins,r1,op,r2,dest) sparc_encode_format3c((ins),2,(op),(r1),52,(r2),(dest)) +#define sparc_fcmp(ins,r1,op,r2) sparc_encode_format3c((ins),2,(op),(r1),53,(r2),0) + +/* logical */ +#define sparc_and(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|1,(dest)) +#define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|1,(dest)) + +#define sparc_andn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|5,(dest)) +#define sparc_andn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|5,(dest)) + +#define sparc_or(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|2,(dest)) +#define sparc_or_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|2,(dest)) + +#define sparc_orn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|6,(dest)) +#define sparc_orn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|6,(dest)) + +#define sparc_xor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|3,(dest)) +#define sparc_xor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(r2),(imm)|3,(dest)) + +#define sparc_xnor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|7,(dest)) +#define sparc_xnor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|7,(dest)) + +/* shift */ +#define sparc_sll(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),37,(dest)) +#define sparc_sll_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),37,(dest)) + +#define sparc_srl(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),38,(dest)) +#define sparc_srl_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),38,(dest)) + +#define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest)) +#define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest)) + +/* alu */ +#define sparc_add(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|0,(dest)) +#define sparc_add_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|0,(dest)) + +#define sparc_addx(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|8,(dest)) +#define sparc_addx_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|8,(dest)) + +#define sparc_sub(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|4,(dest)) +#define sparc_sub_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|4,(dest)) + +#define sparc_subx(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|12,(dest)) +#define sparc_subx_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|12,(dest)) + +#define sparc_muls(ins,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),36,(dest)) +#define sparc_muls_imm(ins,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),36,(dest)) + +#define sparc_umul(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|10,(dest)) +#define sparc_umul_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|10,(dest)) + +#define sparc_smul(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|11,(dest)) +#define sparc_smul_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|11,(dest)) + +#define sparc_udiv(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|14,(dest)) +#define sparc_udiv_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|14,(dest)) + +#define sparc_sdiv(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|15,(dest)) +#define sparc_sdiv_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|15,(dest)) + + +/* branch */ +#define sparc_branch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),2,(displ)) +/* FIXME: float condition codes are different: unify. */ +#define sparc_fbranch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),6,(displ)) + +/* synthetic instructions */ +#define sparc_cmp(ins,r1,r2) sparc_sub((ins),sparc_cc,(r1),(r2),sparc_g0) +#define sparc_cmp_imm(ins,r1,imm) sparc_sub_imm((ins),sparc_cc,(r1),(imm),sparc_g0) + +#define sparc_jmp(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_g0) +#define sparc_jmp_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_g0) +#define sparc_call(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_o7) +#define sparc_call_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_o7) + +#define sparc_test(ins,reg) sparc_or ((ins),sparc_cc,sparc_g0,(reg),sparc_g0) + +#define sparc_ret(ins) sparc_jmpl_imm((ins),sparc_i7,8,sparc_g0) +#define sparc_retl(ins) sparc_jmpl_imm((ins),sparc_o7,8,sparc_g0) +#define sparc_restore_simple(ins) sparc_restore((ins),sparc_g0,sparc_g0,sparc_g0) + +#define sparc_set(ins,val,reg) \ + do { \ + if (((val) & 0x1fff) == 0) \ + sparc_sethi((ins),(val),(reg)); \ + else if (((val) >= -4096) && ((val) <= 4095)) \ + sparc_or_imm((ins),FALSE,sparc_g0,(val),(reg)); \ + else { \ + sparc_sethi((ins),(val),(reg)); \ + sparc_or_imm((ins),FALSE,(reg),(val)&0x3ff,(reg)); \ + } \ + } while (0) + +#define sparc_not(ins,reg) sparc_xnor((ins),FALSE,(reg),sparc_g0,(reg)) +#define sparc_neg(ins,reg) sparc_sub((ins),FALSE,sparc_g0,(reg),(reg)) +#define sparc_clr_reg(ins,reg) sparc_or((ins),FALSE,sparc_g0,sparc_g0,(reg)) + +#define sparc_mov_reg_reg(ins,src,dest) sparc_or_imm((ins),FALSE,(src),0,(dest)) + + +#endif /* __SPARC_CODEGEN_H__ */ + diff --git a/sparc/test.c b/sparc/test.c new file mode 100644 index 0000000..0d4ad18 --- /dev/null +++ b/sparc/test.c @@ -0,0 +1,123 @@ +#include +#include "sparc-codegen.h" + +/* don't run the resulting program, it will destroy your computer, + * just objdump -d it to inspect we generated the correct assembler. + */ + +int +main () +{ + guint32 *p; + guint32 code_buffer [500]; + guint32 local_size = 0, stack_size = 0, code_size = 6; + guint32 arg_pos, simpletype; + unsigned char *ins; + int i, stringp, cur_out_reg, size; + + p = code_buffer; + + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + + /* + * Standard function prolog. + */ + sparc_save_imm (p, sparc_sp, -112-stack_size, sparc_sp); + cur_out_reg = sparc_o0; + arg_pos = 0; + + if (1) { + sparc_mov_reg_reg (p, sparc_i2, cur_out_reg); + ++cur_out_reg; + } + + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + ++cur_out_reg; + sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg); + ++cur_out_reg; + /* + * Insert call to function + */ + sparc_jmpl (p, sparc_i0, 0, sparc_callsite); + sparc_nop (p); + + sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); + sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); + + sparc_ldsb (p, sparc_i3, sparc_l0, sparc_o5); + sparc_ldsb_imm (p, sparc_i3, 2, sparc_o5); + + sparc_ldsh (p, sparc_i3, sparc_l0, sparc_o5); + sparc_ldsh_imm (p, sparc_i3, 2, sparc_o5); + + sparc_ldub (p, sparc_i3, sparc_l0, sparc_o5); + sparc_ldub_imm (p, sparc_i3, 2, sparc_o5); + + sparc_lduh (p, sparc_i3, sparc_l0, sparc_o5); + sparc_lduh_imm (p, sparc_i3, 2, sparc_o5); + + sparc_ldf (p, sparc_i3, sparc_l0, sparc_o5); + sparc_ldf_imm (p, sparc_i3, 2, sparc_o5); + + sparc_stb (p, sparc_i3, sparc_l0, sparc_l2); + sparc_stb_imm (p, sparc_i3, sparc_o5, 2); + + sparc_sethi (p, 0xff000000, sparc_o2); + sparc_rdy (p, sparc_l0); + sparc_wry (p, sparc_l0, sparc_l1); + sparc_wry_imm (p, sparc_l0, 16); + sparc_stbar (p); + sparc_unimp (p, 24); + sparc_flush (p, sparc_l4, 0); + + sparc_and (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_and_imm (p, FALSE, sparc_l0, 0xff, sparc_o1); + sparc_andn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_or (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_orn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_xor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_xnor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + + sparc_sll (p, sparc_l0, sparc_l1, sparc_o1); + sparc_sll_imm (p, sparc_l0, 2, sparc_o1); + sparc_srl (p, sparc_l0, sparc_l1, sparc_o1); + sparc_srl_imm (p, sparc_l0, 2, sparc_o1); + sparc_sra (p, sparc_l0, sparc_l1, sparc_o1); + sparc_sra_imm (p, sparc_l0, 2, sparc_o1); + + sparc_add (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_add_imm (p, FALSE, sparc_l0, 0xff, sparc_o1); + sparc_addx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_sub (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_subx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + + sparc_muls (p, sparc_l0, sparc_l1, sparc_o1); + sparc_umul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_smul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_udiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + sparc_sdiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1); + + sparc_branch (p, FALSE, sparc_bne, -12); + sparc_ret (p); + sparc_retl (p); + sparc_test (p, sparc_l4); + sparc_cmp (p, sparc_l4, sparc_l6); + sparc_cmp_imm (p, sparc_l4, 4); + sparc_restore_simple (p); + + sparc_set (p, 0xff000000, sparc_l7); + sparc_set (p, 1, sparc_l7); + sparc_set (p, 0xff0000ff, sparc_l7); + + sparc_not (p, sparc_g2); + sparc_neg (p, sparc_g3); + sparc_clr_reg (p, sparc_g4); + + + size = (p-code_buffer)*4; + ins = (gchar*)code_buffer; + for (i = 0; i < size; ++i) + printf (".byte %d\n", (unsigned int) ins [i]); + return 0; +} + diff --git a/sparc/tramp.c b/sparc/tramp.c new file mode 100644 index 0000000..ded60e6 --- /dev/null +++ b/sparc/tramp.c @@ -0,0 +1,342 @@ + +/* + * Create trampolines to invoke arbitrary functions. + * + * Copyright (C) Ximian Inc. + * + * Author: Paolo Molaro (lupus@ximian.com) + * + */ + +#include "config.h" +#include +#include "sparc-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" + +/* + * The resulting function takes the form: + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + */ +#define FUNC_ADDR_POS sparc_i0 +#define RETVAL_POS sparc_i1 +#define THIS_POS sparc_i2 +#define ARGP_POS sparc_i3 +#define LOC_POS -4 + +#define ARG_SIZE sizeof (stackval) + +MonoPIFunc +mono_create_trampoline (MonoMethod *method, int runtime) +{ + MonoMethodSignature *sig; + guint32 *p, *code_buffer; + guint32 local_size = 0, stack_size = 0, code_size = 6; + guint32 arg_pos, simpletype; + int i, stringp, cur_out_reg; + + sig = method->signature; + + if (sig->hasthis) + code_size ++; + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + stack_size += sizeof (gpointer); + code_size += i < 6 ? 1 : 3; + continue; + } + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_R4: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + stack_size += 4; + code_size += i < 6 ? 1 : 3; + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) + g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); + stack_size += 4; + code_size += i < 6 ? 1 : 3; + break; + case MONO_TYPE_STRING: + stack_size += 4; + code_size += 5; + local_size++; + break; + case MONO_TYPE_I8: + stack_size += 8; + code_size += i < 6 ? 2 : 3; + break; + case MONO_TYPE_R8: + stack_size += 8; + code_size += i < 6 ? 2 : 3; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + /* + * FIXME: take into account large return values. + */ + + code_buffer = p = alloca (code_size * 4); + cur_out_reg = sparc_o0; + + /* + * Standard function prolog. + */ + sparc_save_imm (p, sparc_sp, -112-stack_size, sparc_sp); + /* + * We store some local vars here to handle string pointers. + * and align to 16 byte boundary... + */ +#if 0 + if (local_size) { + x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size * 4); + stack_size = (stack_size * local_size * 4) % 16; + } else { + stack_size = stack_size % 16; + } + if (stack_size) + x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); +#endif + + /* + * %i3 has the pointer to the args. + */ + + if (sig->hasthis) { + sparc_mov_reg_reg (p, sparc_i2, cur_out_reg); + ++cur_out_reg; + } + + /* + * Push arguments in reverse order. + */ + stringp = 0; + for (i = 0; i < sig->param_count; ++i) { + arg_pos = ARG_SIZE * i; + if (sig->params [i]->byref) { + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + ++cur_out_reg; + continue; + } + simpletype = sig->params [i]->type; +enum_marshal: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + ++cur_out_reg; + break; + case MONO_TYPE_VALUETYPE: + if (!sig->params [i]->data.klass->enumtype) { + /* it's a structure that fits in 4 bytes, need to push the value pointed to */ + /*x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_push_regp (p, X86_EAX);*/ + g_assert (0); + } else { + /* it's an enum value */ + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_marshal; + } + break; + case MONO_TYPE_R8: + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + ++cur_out_reg; + sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg); + ++cur_out_reg; + break; +#if 0 + case MONO_TYPE_STRING: + /* + * If it is an internalcall we assume it's the object we want. + * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. + */ + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + x86_push_membase (p, X86_EDX, arg_pos); + break; + } + /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ + x86_push_membase (p, X86_EDX, arg_pos); + x86_mov_reg_imm (p, X86_EDX, mono_string_to_utf8); + x86_call_reg (p, X86_EDX); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); + x86_push_reg (p, X86_EAX); + /* + * Store the pointer in a local we'll free later. + */ + stringp++; + x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); + /* + * we didn't save the reg: restore it here. + */ + if (i > 1) + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); + break; +#endif + case MONO_TYPE_I8: + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + ++cur_out_reg; + sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg); + ++cur_out_reg; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + /* + * Insert call to function + */ + sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite); + sparc_nop (p); + + /* + * Handle retval. + * Small integer and pointer values are in EAX. + * Long integers are in EAX:EDX. + * FP values are on the FP stack. + */ +#if 0 + if (sig->ret->byref) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 1); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 2); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + case MONO_TYPE_STRING: + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + } + + /* If the argument is non-null, then convert the value back */ + x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); + x86_branch8 (p, X86_CC_EQ, 11, FALSE); + x86_push_reg (p, X86_EAX); + x86_mov_reg_imm (p, X86_EDX, mono_string_new); + x86_call_reg (p, X86_EDX); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); + + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + break; + case MONO_TYPE_R4: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_fst_membase (p, X86_ECX, 0, FALSE, TRUE); + break; + case MONO_TYPE_R8: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_fst_membase (p, X86_ECX, 0, TRUE, TRUE); + break; + case MONO_TYPE_I8: + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + x86_mov_membase_reg (p, X86_ECX, 4, X86_EDX, 4); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } +#endif + /* + * free the allocated strings. + */ +#if 0 + if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { + if (local_size) + x86_mov_reg_imm (p, X86_EDX, g_free); + for (i = 1; i <= local_size; ++i) { + x86_push_membase (p, X86_EBP, LOC_POS * i); + x86_call_reg (p, X86_EDX); + } + } +#endif + /* + * Standard epilog. + * 8 may be 12 when returning structures (to skip unimp opcode). + */ + sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); + sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); + + /* FIXME: need to flush */ + return g_memdup (code_buffer, 4 * (p - code_buffer)); +} + +void * +mono_create_method_pointer (MonoMethod *method) +{ + return NULL; +} + +MonoMethod* +mono_method_pointer_get (void *code) +{ + return NULL; +} -- cgit v1.1 From 0ffc7e417ee15973120c4f3a0cb0f2732c5c6633 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Mon, 11 Feb 2002 22:48:46 +0000 Subject: More svn path=/trunk/mono/; revision=2341 --- sparc/.cvsignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 sparc/.cvsignore diff --git a/sparc/.cvsignore b/sparc/.cvsignore new file mode 100644 index 0000000..282522d --- /dev/null +++ b/sparc/.cvsignore @@ -0,0 +1,2 @@ +Makefile +Makefile.in -- cgit v1.1 From 6b6716c9eaa66549c9c1cf86934a54a830afc1b6 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 13 Feb 2002 08:29:02 +0000 Subject: pass the domain to mono_string_new svn path=/trunk/mono/; revision=2365 --- x86/tramp.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/x86/tramp.c b/x86/tramp.c index e9fef81..1408f41 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -13,6 +13,7 @@ #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" /* * The resulting function takes the form: @@ -26,6 +27,14 @@ #define ARG_SIZE sizeof (stackval) +MonoString* +mono_string_new_wrapper (const char *text) +{ + MonoDomain *domain = mono_domain_get (); + + return mono_string_new (domain, text); +} + MonoPIFunc mono_create_trampoline (MonoMethod *method, int runtime) { @@ -268,7 +277,7 @@ enum_retvalue: x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); x86_branch8 (p, X86_CC_EQ, 11, FALSE); x86_push_reg (p, X86_EAX); - x86_mov_reg_imm (p, X86_EDX, mono_string_new); + x86_mov_reg_imm (p, X86_EDX, mono_string_new_wrapper); x86_call_reg (p, X86_EDX); x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); -- cgit v1.1 From c6fd0cb7010239a29091a50aa5354e96f74bedf2 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 13 Feb 2002 12:22:52 +0000 Subject: added some docu svn path=/trunk/mono/; revision=2372 --- x86/tramp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x86/tramp.c b/x86/tramp.c index 1408f41..944922e 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -27,7 +27,7 @@ #define ARG_SIZE sizeof (stackval) -MonoString* +static MonoString* mono_string_new_wrapper (const char *text) { MonoDomain *domain = mono_domain_get (); -- cgit v1.1 From 2cee2566ae50aa32e13864135260e16fd21bfac1 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sun, 17 Feb 2002 19:41:12 +0000 Subject: 2002-02-17 Radek Doulik * ppc/tramp.c: fixed minimal stack size, fixed string parameters, fix byte and half word parameters * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth svn path=/trunk/mono/; revision=2460 --- ppc/ppc-codegen.h | 3 +++ ppc/tramp.c | 61 ++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 624f088..9c09b66 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -88,7 +88,10 @@ typedef enum { #define ppc_li(c,D,v) ppc_addi (c, D, 0, v); #define ppc_lis(c,D,v) ppc_addis (c, D, 0, v); #define ppc_lwz(c,D,d,a) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_lhz(c,D,d,a) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_lbz(c,D,d,a) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_stw(c,S,d,a) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_sth(c,S,d,a) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_stb(c,S,d,a) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_stwu(c,s,d,a) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) diff --git a/ppc/tramp.c b/ppc/tramp.c index 871686e..4fda42d 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -58,7 +58,7 @@ flush_icache (guint8 *code, guint size) #define PROLOG_INS 8 #define CALL_INS 2 #define EPILOG_INS 6 -#define MINIMAL_STACK_SIZE 4 +#define MINIMAL_STACK_SIZE 5 #define FLOAT_REGS 8 #define GENERAL_REGS 8 @@ -171,6 +171,9 @@ enum_retvalue: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -228,8 +231,8 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) /* handle our parameters */ if (strings) { - ppc_stw (p, ppc_r30, 16, ppc_r1); - ppc_stw (p, ppc_r29, 20, ppc_r1); + ppc_stw (p, ppc_r30, stack_size - 16, ppc_r1); + ppc_stw (p, ppc_r29, stack_size - 12, ppc_r1); if (method->signature->hasthis) { ppc_stw (p, ppc_r28, 24, ppc_r1); } @@ -255,9 +258,23 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) } else { \ NOT_IMPLEMENTED("save on stack"); \ } +#define SAVE_2_IN_GENERIC_REGISTER \ + if (gr < GENERAL_REGS) { \ + ppc_lhz (p, ppc_r3 + gr, i*16, ARG_BASE); \ + gr ++; \ + } else { \ + NOT_IMPLEMENTED("save on stack"); \ + } +#define SAVE_1_IN_GENERIC_REGISTER \ + if (gr < GENERAL_REGS) { \ + ppc_lbz (p, ppc_r3 + gr, i*16, ARG_BASE); \ + gr ++; \ + } else { \ + NOT_IMPLEMENTED("save on stack"); \ + } inline static guint8* -emit_save_parameters (guint8 *p, MonoMethod *method, guint strings, gint runtime) +emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) { MonoMethodSignature *sig; guint i, fr, gr, act_strs; @@ -269,13 +286,14 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint strings, gint runtime if (strings) { for (i = 0; i < sig->param_count; ++i) { - if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING) { + if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING + && !((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime)) { ppc_lis (p, ppc_r0, (guint32) mono_string_to_utf8 >> 16); ppc_lwz (p, ppc_r3, i*16, ppc_r30); ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_to_utf8 & 0xffff); ppc_mtlr (p, ppc_r0); ppc_blrl (p); - ppc_stw (p, ppc_r3, 24 + act_strs, ppc_r31); + ppc_stw (p, ppc_r3, stack_size - 20 - act_strs, ppc_r31); act_strs += 4; } } @@ -296,11 +314,15 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint strings, gint runtime enum_calc_size: switch (simpletype) { case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: + SAVE_1_IN_GENERIC_REGISTER; + break; case MONO_TYPE_I2: case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + SAVE_2_IN_GENERIC_REGISTER; + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -332,9 +354,9 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint strings, gint runtime SAVE_4_IN_GENERIC_REGISTER; } else { if (gr < 8) { - ppc_lwz (p, ppc_r3 + gr, 24 + act_strs * 4, ppc_r31); + ppc_lwz (p, ppc_r3 + gr, stack_size - 20 - act_strs, ppc_r31); gr ++; - act_strs ++; + act_strs += 4; } else NOT_IMPLEMENTED ("string on stack"); } @@ -416,6 +438,12 @@ enum_retvalue: ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ ppc_stb (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_sth (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -460,7 +488,7 @@ enum_retvalue: } static inline guint8 * -emit_epilog (guint8 *p, MonoMethod *method, guint strings) +emit_epilog (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gboolean runtime) { if (strings) { MonoMethodSignature *sig = method->signature; @@ -469,9 +497,10 @@ emit_epilog (guint8 *p, MonoMethod *method, guint strings) /* free allocated memory */ act_strs = 0; for (i = 0; i < sig->param_count; ++i) { - if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING) { + if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING + && !((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime)) { ppc_lis (p, ppc_r0, (guint32) g_free >> 16); - ppc_lwz (p, ppc_r3, 24 + act_strs, ppc_r31); + ppc_lwz (p, ppc_r3, stack_size - 20 - act_strs, ppc_r31); ppc_ori (p, ppc_r0, ppc_r0, (guint32) g_free & 0xffff); ppc_mtlr (p, ppc_r0); ppc_blrl (p); @@ -480,8 +509,8 @@ emit_epilog (guint8 *p, MonoMethod *method, guint strings) } /* restore volatile registers */ - ppc_lwz (p, ppc_r30, 16, ppc_r1); - ppc_lwz (p, ppc_r29, 20, ppc_r1); + ppc_lwz (p, ppc_r30, stack_size - 16, ppc_r1); + ppc_lwz (p, ppc_r29, stack_size - 12, ppc_r1); if (method->signature->hasthis) { ppc_lwz (p, ppc_r28, 24, ppc_r1); } @@ -509,9 +538,9 @@ mono_create_trampoline (MonoMethod *method, int runtime) p = code_buffer = alloc_code_memory (code_size); p = emit_prolog (p, method, stack_size, strings); - p = emit_save_parameters (p, method, strings, runtime); + p = emit_save_parameters (p, method, stack_size, strings, runtime); p = emit_call_and_store_retval (p, method, strings); - p = emit_epilog (p, method, strings); + p = emit_epilog (p, method, stack_size, strings, runtime); /* { guchar *cp; -- cgit v1.1 From b7fa0baa6c15d3ee14a1b67dd5b56d21a931894b Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sun, 17 Feb 2002 20:02:39 +0000 Subject: (mono_string_new_wrapper): new helper function, cut&pasted from x86, modified to check for NULL text to avoid branching in generated code (calculate_sizes): updated for string retval changes (emit_call_and_store_retval): updated for string retval svn path=/trunk/mono/; revision=2461 --- ppc/tramp.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 4fda42d..3db1134 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -11,6 +11,7 @@ #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" #ifdef NEED_MPROTECT #include @@ -184,8 +185,13 @@ enum_retvalue: case MONO_TYPE_R8: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: + *code_size += 8; + break; case MONO_TYPE_STRING: *code_size += 8; + if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { + *code_size += 16; + } break; case MONO_TYPE_I8: *code_size += 12; @@ -414,8 +420,14 @@ alloc_code_memory (guint code_size) return p; } +static MonoString* +mono_string_new_wrapper (const char *text) +{ + return text ? mono_string_new (mono_domain_get (), text) : NULL; +} + static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint strings) +emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint strings, gint runtime) { MonoMethodSignature *sig = method->signature; guint32 simpletype; @@ -452,9 +464,20 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; case MONO_TYPE_STRING: + if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { + ppc_lis (p, ppc_r0, (guint32) mono_string_new_wrapper >> 16); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_new_wrapper & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + } + ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; case MONO_TYPE_R4: ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ @@ -539,7 +562,7 @@ mono_create_trampoline (MonoMethod *method, int runtime) p = code_buffer = alloc_code_memory (code_size); p = emit_prolog (p, method, stack_size, strings); p = emit_save_parameters (p, method, stack_size, strings, runtime); - p = emit_call_and_store_retval (p, method, strings); + p = emit_call_and_store_retval (p, method, strings, runtime); p = emit_epilog (p, method, stack_size, strings, runtime); /* { -- cgit v1.1 From 1da21d342a98bedfc9295846080043d8946f4029 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sun, 17 Feb 2002 21:10:29 +0000 Subject: la la la, ChangeLog entries svn path=/trunk/mono/; revision=2463 --- ChangeLog | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ChangeLog b/ChangeLog index 10a73f6..94e1f8a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,18 @@ +2002-02-17 Radek Doulik + + * ppc/tramp.c: fixed minimal stack size, fixed string parameters, + fix byte and half word parameters + (mono_string_new_wrapper): new helper function, cut&pasted from + x86, modified to check for NULL text to avoid branching in + generated code + (calculate_sizes): updated for string retval changes + (emit_call_and_store_retval): updated for string retval + + * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth + +2002-02-16 Radek Doulik + + * ppc/tramp.c (emit_call_and_store_retval): support U2, I2, CHAR Mon Feb 11 18:40:04 CET 2002 Paolo Molaro -- cgit v1.1 From e756cc154586ebdd6f4bba8b730fca09611874cf Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 19 Feb 2002 15:40:57 +0000 Subject: Tue Feb 19 20:19:38 CET 2002 Paolo Molaro * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge). Tue Feb 19 20:20:15 CET 2002 Paolo Molaro * dump.c: the prolog is before each arg in the custom attribute blob. svn path=/trunk/mono/; revision=2513 --- ChangeLog | 5 +++++ x86/tramp.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 94e1f8a..05dcf12 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Tue Feb 19 20:19:38 CET 2002 Paolo Molaro + + * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge). + 2002-02-17 Radek Doulik * ppc/tramp.c: fixed minimal stack size, fixed string parameters, diff --git a/x86/tramp.c b/x86/tramp.c index 944922e..8c861cd 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -503,5 +503,5 @@ mono_method_pointer_get (void *code) unsigned char *c = code; if (c [2] != 'M' || c [3] != 'o') return NULL; - return *(MonoMethod**)(code + sizeof (gpointer)); + return *(MonoMethod**)(c + sizeof (gpointer)); } -- cgit v1.1 From 5dbc4bd3639f2d012a1103ae1b0f911768e460ab Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Tue, 19 Feb 2002 19:49:10 +0000 Subject: 2002-02-19 Radek Doulik * ppc/tramp.c (emit_save_parameters): don't start saving 64bit values to even registers svn path=/trunk/mono/; revision=2519 --- ChangeLog | 4 ++++ ppc/tramp.c | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 05dcf12..dee1900 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-02-19 Radek Doulik + + * ppc/tramp.c (emit_save_parameters): don't start saving 64bit values to + even registers Tue Feb 19 20:19:38 CET 2002 Paolo Molaro diff --git a/ppc/tramp.c b/ppc/tramp.c index 3db1134..41e05f8 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -21,8 +21,8 @@ #endif #endif -/* void -fake_func (gdouble (*callme)(), stackval *retval, void *this_obj, stackval *arguments) +/* gpointer +fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) { guint32 i = 0xc002becd; @@ -30,6 +30,8 @@ fake_func (gdouble (*callme)(), stackval *retval, void *this_obj, stackval *argu *(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); *(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); + + return (gpointer) (*callme) (((MonoType *)arguments [0]. data.p)->data.klass); } */ #define MIN_CACHE_LINE 8 @@ -80,6 +82,8 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } else { *code_size += 16; /* 2x load from stack */ } + if ((*gr) && 1) + (*gr) ++; (*gr) ++; } (*gr) ++; @@ -369,6 +373,8 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str break; case MONO_TYPE_I8: if (gr < 7) { + if (gr & 1) + gr ++; g_warning ("check endianess"); ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); gr ++; -- cgit v1.1 From 725e90ef0e13752e357358ddef152a30beae174f Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Tue, 19 Feb 2002 20:50:13 +0000 Subject: added stack saving for most arguments svn path=/trunk/mono/; revision=2523 --- ChangeLog | 1 + ppc/tramp.c | 71 ++++++++++++++++++++++--------------------------------------- 2 files changed, 26 insertions(+), 46 deletions(-) diff --git a/ChangeLog b/ChangeLog index dee1900..3365335 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,7 @@ * ppc/tramp.c (emit_save_parameters): don't start saving 64bit values to even registers + added stack saving for most arguments Tue Feb 19 20:19:38 CET 2002 Paolo Molaro diff --git a/ppc/tramp.c b/ppc/tramp.c index 41e05f8..0b19a93 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -78,7 +78,7 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } else { if (*gr >= GENERAL_REGS - 1) { *stack_size += 8 + (*stack_size % 8); - *code_size += 16; /* 2x load from stack, save to stack */ + *code_size += 16; /* 2x load from stack, 2x save to stack */ } else { *code_size += 16; /* 2x load from stack */ } @@ -255,7 +255,7 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ } - ppc_stw (p, ppc_r4, 8, ppc_r31); /* preserve "retval", sp[+8] */ + ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ return p; } @@ -266,33 +266,22 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ gr ++; \ } else { \ - NOT_IMPLEMENTED("save on stack"); \ - } -#define SAVE_2_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lhz (p, ppc_r3 + gr, i*16, ARG_BASE); \ - gr ++; \ - } else { \ - NOT_IMPLEMENTED("save on stack"); \ - } -#define SAVE_1_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lbz (p, ppc_r3 + gr, i*16, ARG_BASE); \ - gr ++; \ - } else { \ - NOT_IMPLEMENTED("save on stack"); \ + ppc_lwz (p, ppc_r11, i*16, ARG_BASE); \ + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ + stack_par_pos += 4; \ } inline static guint8* emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) { MonoMethodSignature *sig; - guint i, fr, gr, act_strs; + guint i, fr, gr, act_strs, stack_par_pos; guint32 simpletype; fr = gr = 0; act_strs = 0; sig = method->signature; + stack_par_pos = 8; if (strings) { for (i = 0; i < sig->param_count; ++i) { @@ -303,7 +292,7 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_to_utf8 & 0xffff); ppc_mtlr (p, ppc_r0); ppc_blrl (p); - ppc_stw (p, ppc_r3, stack_size - 20 - act_strs, ppc_r31); + ppc_stw (p, ppc_r3, stack_size - 24 - act_strs, ppc_r31); act_strs += 4; } } @@ -326,13 +315,9 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - SAVE_1_IN_GENERIC_REGISTER; - break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: - SAVE_2_IN_GENERIC_REGISTER; - break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -364,7 +349,7 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str SAVE_4_IN_GENERIC_REGISTER; } else { if (gr < 8) { - ppc_lwz (p, ppc_r3 + gr, stack_size - 20 - act_strs, ppc_r31); + ppc_lwz (p, ppc_r3 + gr, stack_size - 24 - act_strs, ppc_r31); gr ++; act_strs += 4; } else @@ -433,7 +418,7 @@ mono_string_new_wrapper (const char *text) } static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint strings, gint runtime) +emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) { MonoMethodSignature *sig = method->signature; guint32 simpletype; @@ -444,8 +429,8 @@ emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint strings, gint r /* get return value */ if (sig->ret->byref) { - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ } else { simpletype = sig->ret->type; enum_retvalue: @@ -453,15 +438,9 @@ enum_retvalue: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stb (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_sth (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -470,8 +449,8 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ break; case MONO_TYPE_STRING: if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { @@ -481,23 +460,23 @@ enum_retvalue: ppc_blrl (p); } - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ break; case MONO_TYPE_R4: - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stfs (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stfs (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ break; case MONO_TYPE_R8: - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stfd (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stfd (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ break; case MONO_TYPE_I8: g_warning ("check endianess"); - ppc_lwz (p, ppc_r9, 8, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - ppc_stw (p, ppc_r4, 4, ppc_r9); /* save return value (r3) to "retval" */ + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + ppc_stw (p, ppc_r4, 4, ppc_r9); /* save return value (r3) to "retval" */ break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { @@ -529,7 +508,7 @@ emit_epilog (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gbo if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING && !((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime)) { ppc_lis (p, ppc_r0, (guint32) g_free >> 16); - ppc_lwz (p, ppc_r3, stack_size - 20 - act_strs, ppc_r31); + ppc_lwz (p, ppc_r3, stack_size - 24 - act_strs, ppc_r31); ppc_ori (p, ppc_r0, ppc_r0, (guint32) g_free & 0xffff); ppc_mtlr (p, ppc_r0); ppc_blrl (p); @@ -568,7 +547,7 @@ mono_create_trampoline (MonoMethod *method, int runtime) p = code_buffer = alloc_code_memory (code_size); p = emit_prolog (p, method, stack_size, strings); p = emit_save_parameters (p, method, stack_size, strings, runtime); - p = emit_call_and_store_retval (p, method, strings, runtime); + p = emit_call_and_store_retval (p, method, stack_size, strings, runtime); p = emit_epilog (p, method, stack_size, strings, runtime); /* { -- cgit v1.1 From 6bb3f7ead4ab8d574273f5bdacf32b29809ace80 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Tue, 19 Feb 2002 20:57:29 +0000 Subject: ops, fix return value passing svn path=/trunk/mono/; revision=2526 --- ppc/tramp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ppc/tramp.c b/ppc/tramp.c index 0b19a93..e25db18 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -438,9 +438,15 @@ enum_retvalue: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_stb (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: + ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ + ppc_sth (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: -- cgit v1.1 From 0c4f3b00c8e831077c6ba1b28065e7be81bbff61 Mon Sep 17 00:00:00 2001 From: Jeffrey Stedfast Date: Fri, 22 Feb 2002 19:43:09 +0000 Subject: 2002-02-22 Jeffrey Stedfast * sparc/tramp.c (mono_create_trampoline): Much tinkering to get the opcodes more correct. Still needs a lot of work. svn path=/trunk/mono/; revision=2602 --- ChangeLog | 5 + sparc/tramp.c | 383 ++++++++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 295 insertions(+), 93 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3365335..81bf2e7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-02-22 Jeffrey Stedfast + + * sparc/tramp.c (mono_create_trampoline): Much tinkering to get + the opcodes more correct. Still needs a lot of work. + 2002-02-19 Radek Doulik * ppc/tramp.c (emit_save_parameters): don't start saving 64bit values to diff --git a/sparc/tramp.c b/sparc/tramp.c index ded60e6..8bc08ea 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -1,10 +1,11 @@ - +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Create trampolines to invoke arbitrary functions. * * Copyright (C) Ximian Inc. * - * Author: Paolo Molaro (lupus@ximian.com) + * Authors: Paolo Molaro (lupus@ximian.com) + * Jeffrey Stedfast * */ @@ -14,11 +15,9 @@ #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" + -/* - * The resulting function takes the form: - * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); - */ #define FUNC_ADDR_POS sparc_i0 #define RETVAL_POS sparc_i1 #define THIS_POS sparc_i2 @@ -27,28 +26,113 @@ #define ARG_SIZE sizeof (stackval) -MonoPIFunc -mono_create_trampoline (MonoMethod *method, int runtime) +static void +fake_func (void (*callme)(gpointer, gpointer), stackval *retval, void *this_obj, stackval *arguments) { - MonoMethodSignature *sig; - guint32 *p, *code_buffer; - guint32 local_size = 0, stack_size = 0, code_size = 6; - guint32 arg_pos, simpletype; - int i, stringp, cur_out_reg; + //*(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); + //*(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); + + /* internal_from_handle() */ + /* return (gpointer)(*callme) (((MonoType *)arguments [0].data.p)->data.klass); */ + + /* InitializeArray() */ + return (*callme) (arguments [0].data.p, arguments [1].data.p); +} - sig = method->signature; +static const char * +mono_type (int type) +{ + switch (type) { + case MONO_TYPE_END: + return "MONO_TYPE_END"; + case MONO_TYPE_VOID: + return "MONO_TYPE_VOID"; + case MONO_TYPE_BOOLEAN: + return "MONO_TYPE_BOOLEAN"; + case MONO_TYPE_CHAR: + return "MONO_TYPE_CHAR"; + case MONO_TYPE_I1: + return "MONO_TYPE_I1"; + case MONO_TYPE_U1: + return "MONO_TYPE_U1"; + case MONO_TYPE_I2: + return "MONO_TYPE_I2"; + case MONO_TYPE_U2: + return "MONO_TYPE_U2"; + case MONO_TYPE_I4: + return "MONO_TYPE_I4"; + case MONO_TYPE_U4: + return "MONO_TYPE_U4"; + case MONO_TYPE_I8: + return "MONO_TYPE_I8"; + case MONO_TYPE_U8: + return "MONO_TYPE_U8"; + case MONO_TYPE_R4: + return "MONO_TYPE_R4"; + case MONO_TYPE_R8: + return "MONO_TYPE_R8"; + case MONO_TYPE_STRING: + return "MONO_TYPE_STRING"; + case MONO_TYPE_PTR: + return "MONO_TYPE_PTR"; + case MONO_TYPE_BYREF: + return "MONO_TYPE_BYREF"; + case MONO_TYPE_VALUETYPE: + return "MONO_TYPE_VALUETYPE"; + case MONO_TYPE_CLASS: + return "MONO_TYPE_CLASS"; + case MONO_TYPE_ARRAY: + return "MONO_TYPE_ARRAY"; + case MONO_TYPE_TYPEDBYREF: + return "MONO_TYPE_TYPEBYREF"; + case MONO_TYPE_I: + return "MONO_TYPE_I"; + case MONO_TYPE_U: + return "MONO_TYPE_U"; + case MONO_TYPE_FNPTR: + return "MONO_TYPE_FNPTR"; + case MONO_TYPE_OBJECT: + return "MONO_TYPE_OBJECT"; + case MONO_TYPE_SZARRAY: + return "MONO_TYPE_SZARRAY"; + case MONO_TYPE_CMOD_REQD: + return "MONO_TYPE_CMOD_REQD"; + case MONO_TYPE_CMOD_OPT: + return "MONO_TYPE_CMOD_OPT"; + case MONO_TYPE_INTERNAL: + return "MONO_TYPE_INTERNAL"; + case MONO_TYPE_MODIFIER: + return "MONO_TYPE_MODIFIER"; + case MONO_TYPE_SENTINEL: + return "MONO_TYPE_SENTINEL"; + case MONO_TYPE_PINNED: + return "MONO_TYPE_PINNED"; + } + return "??"; +} + +static void +calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, guint32 *code_size, int runtime) +{ + MonoMethodSignature *sig = method->signature; + guint32 local = 0, stack = 0, code = 6; + guint32 simpletype; + int i; + + /* function arguments */ if (sig->hasthis) - code_size ++; + code++; - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) { - stack_size += sizeof (gpointer); - code_size += i < 6 ? 1 : 3; + for (i = 0; i < sig->param_count; i++) { + if (sig->params[i]->byref) { + stack += sizeof (gpointer); + code += i < 6 ? 1 : 3; continue; } - simpletype = sig->params [i]->type; -enum_calc_size: + + simpletype = sig->params[i]->type; + enum_calc_size: switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: @@ -65,47 +149,143 @@ enum_calc_size: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: - stack_size += 4; - code_size += i < 6 ? 1 : 3; + stack += 4; + code += i < 6 ? 1 : 3; break; case MONO_TYPE_VALUETYPE: - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; + if (sig->params[i]->data.klass->enumtype) { + simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) - g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); - stack_size += 4; - code_size += i < 6 ? 1 : 3; + if (mono_class_value_size (sig->params[i]->data.klass, NULL) != 4) + g_error ("can only marshal enums, not generic structures (size: %d)", + mono_class_value_size (sig->params[i]->data.klass, NULL)); + stack += 4; + code += i < 6 ? 1 : 3; break; case MONO_TYPE_STRING: - stack_size += 4; - code_size += 5; - local_size++; + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + stack += 4; + code += i < 6 ? 1 : 3; + break; + } + + stack += 4; + code += 5; + local++; break; case MONO_TYPE_I8: - stack_size += 8; - code_size += i < 6 ? 2 : 3; + stack += 8; + code += i < 6 ? 2 : 3; break; case MONO_TYPE_R8: - stack_size += 8; - code_size += i < 6 ? 2 : 3; + stack += 8; + code += i < 6 ? 2 : 3; break; default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); + g_error ("Can't trampoline 0x%x", sig->params[i]->type); } } - /* - * FIXME: take into account large return values. - */ + + /* function return value */ + if (sig->ret->byref) { + code += 2; + } else { + simpletype = sig->ret->type; + enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + code += 2; + break; +#if 0 + case MONO_TYPE_STRING: + code += 2; + if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { + code += 4; + } + break; +#endif + case MONO_TYPE_I8: + code += 3; + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + code += 2; + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + +#define STACKALIGN(x) (((x) + 15) & (~15)) +#define MINFRAME ((16 + 1 + 6) * 4) /* minimum size stack frame, in bytes: + * 16 for registers, 1 for "hidden param", + * and 6 in which a callee can store it's + * arguments. + */ + + stack += MINFRAME + (local * 4); + + fprintf (stderr, "\tstack size: %d (%d)\n\tcode size: %d\n", STACKALIGN(stack), stack, code); + + *local_size = local; + *stack_size = STACKALIGN(stack); + *code_size = code; +} + +static MonoString * +mono_string_new_wrapper (const char *text) +{ + return text ? mono_string_new (mono_domain_get (), text) : NULL; +} +MonoPIFunc +mono_create_trampoline (MonoMethod *method, int runtime) +{ + MonoMethodSignature *sig; + guint32 *p, *code_buffer; + guint32 local_size, stack_size, code_size; + guint32 arg_pos, simpletype; + int i, stringp, cur_out_reg; + + sig = method->signature; + + fprintf (stderr, "\nPInvoke [start emiting] %s\n", method->name); + calculate_sizes (method, &local_size, &stack_size, &code_size, runtime); + code_buffer = p = alloca (code_size * 4); cur_out_reg = sparc_o0; - - /* - * Standard function prolog. - */ - sparc_save_imm (p, sparc_sp, -112-stack_size, sparc_sp); + + /* Standard function prolog. */ + sparc_save_imm (p, sparc_sp, -stack_size, sparc_sp); +#if 0 + /* gcc seems to want to store %i0 through %i3 for some reason */ + sparc_st_imm (p, sparc_i0, sparc_fp, 68); + sparc_st_imm (p, sparc_i1, sparc_fp, 72); + sparc_st_imm (p, sparc_i2, sparc_fp, 76); + sparc_st_imm (p, sparc_i3, sparc_fp, 80); +#endif + /* * We store some local vars here to handle string pointers. * and align to 16 byte boundary... @@ -120,29 +300,31 @@ enum_calc_size: if (stack_size) x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); #endif - + /* * %i3 has the pointer to the args. */ - + if (sig->hasthis) { sparc_mov_reg_reg (p, sparc_i2, cur_out_reg); - ++cur_out_reg; + cur_out_reg++; } - - /* - * Push arguments in reverse order. - */ + + /* Push arguments in reverse order. */ stringp = 0; - for (i = 0; i < sig->param_count; ++i) { + for (i = 0; i < sig->param_count; i++) { arg_pos = ARG_SIZE * i; - if (sig->params [i]->byref) { + + if (sig->params[i]->byref) { + fprintf (stderr, "\tpushing params[%d] (byref): type=%s;\n", i, mono_type (sig->params[i]->type)); sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - ++cur_out_reg; + cur_out_reg++; continue; } - simpletype = sig->params [i]->type; + + simpletype = sig->params[i]->type; enum_marshal: + fprintf (stderr, "\tpushing params[%d]: type=%s;\n", i, mono_type (simpletype)); switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: @@ -155,42 +337,36 @@ enum_marshal: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: + case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: - case MONO_TYPE_R4: sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - ++cur_out_reg; + cur_out_reg++; break; case MONO_TYPE_VALUETYPE: - if (!sig->params [i]->data.klass->enumtype) { - /* it's a structure that fits in 4 bytes, need to push the value pointed to */ - /*x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_push_regp (p, X86_EAX);*/ - g_assert (0); - } else { + if (sig->params[i]->data.klass->enumtype) { /* it's an enum value */ - simpletype = sig->params [i]->data.klass->enum_basetype->type; + simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_marshal; + } else { + /*sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg);*/ + sparc_ld_imm (p, sparc_i3, arg_pos, sparc_l0); + sparc_ld (p, sparc_l0, 0, cur_out_reg); + cur_out_reg++; } break; - case MONO_TYPE_R8: - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - ++cur_out_reg; - sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg); - ++cur_out_reg; - break; -#if 0 case MONO_TYPE_STRING: - /* - * If it is an internalcall we assume it's the object we want. - * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. - */ if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - x86_push_membase (p, X86_EDX, arg_pos); + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + cur_out_reg++; break; } - /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ + +#if 0 + sparc_sethi (p, mono_string_to_utf8, sparc_l0); + sparc_or_imm (p, 0, sparc_l0, mono_string_to_utf8, sparc_l1); + x86_push_membase (p, X86_EDX, arg_pos); x86_mov_reg_imm (p, X86_EDX, mono_string_to_utf8); x86_call_reg (p, X86_EDX); @@ -206,25 +382,31 @@ enum_marshal: */ if (i > 1) x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); - break; #endif + fprintf (stderr, "MONO_TYPE_STRING not yet fully supported.\n"); + exit (1); + break; case MONO_TYPE_I8: sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - ++cur_out_reg; - sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg); - ++cur_out_reg; + cur_out_reg++; + sparc_ld_imm (p, sparc_i3, arg_pos + 4, cur_out_reg); + cur_out_reg++; + break; + case MONO_TYPE_R8: + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); + cur_out_reg++; + sparc_ld_imm (p, sparc_i3, arg_pos + 4, cur_out_reg); + cur_out_reg++; break; default: g_error ("Can't trampoline 0x%x", sig->params [i]->type); } } - - /* - * Insert call to function - */ + + /* call the function */ sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite); sparc_nop (p); - + /* * Handle retval. * Small integer and pointer values are in EAX. @@ -268,7 +450,7 @@ enum_retvalue: x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; } - + /* If the argument is non-null, then convert the value back */ x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); x86_branch8 (p, X86_CC_EQ, 11, FALSE); @@ -276,7 +458,7 @@ enum_retvalue: x86_mov_reg_imm (p, X86_EDX, mono_string_new); x86_call_reg (p, X86_EDX); x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; @@ -305,10 +487,9 @@ enum_retvalue: } } #endif - /* - * free the allocated strings. - */ + #if 0 + /* free the allocated strings... */ if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { if (local_size) x86_mov_reg_imm (p, X86_EDX, g_free); @@ -324,7 +505,23 @@ enum_retvalue: */ sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); - + + { + unsigned char *inptr, *inend; + + inptr = (unsigned char *) code_buffer; + inend = (unsigned char *) p; + + printf (".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); + while (inptr < inend) { + printf (".byte 0x%x\n", *inptr); + inptr++; + } + fflush (stdout); + } + + fprintf (stderr, "PInvoke [finish emiting] %s\n", method->name); + /* FIXME: need to flush */ return g_memdup (code_buffer, 4 * (p - code_buffer)); } -- cgit v1.1 From 56dde5e20e11f2d9d2a3522923a5a4729bed469f Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sun, 24 Feb 2002 01:40:17 +0000 Subject: 2002-02-24 Radek Doulik * ppc/tramp.c (mono_create_method_pointer): basic delegates implementation, it works for simple delegates now and I am already pretty close to have it working for every delegates, but I am going to sleep and finish it tomorrow? svn path=/trunk/mono/; revision=2611 --- ChangeLog | 7 ++++ ppc/ppc-codegen.h | 1 + ppc/tramp.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 127 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index 81bf2e7..e503aa9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +2002-02-24 Radek Doulik + + * ppc/tramp.c (mono_create_method_pointer): basic delegates + implementation, it works for simple delegates now and I am already + pretty close to have it working for every delegates, but I am + going to sleep and finish it tomorrow? + 2002-02-22 Jeffrey Stedfast * sparc/tramp.c (mono_create_trampoline): Much tinkering to get diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 9c09b66..c3f79b2 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -102,6 +102,7 @@ typedef enum { #define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) #define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S) +#define ppc_b(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2)) #define ppc_blrl(c) ppc_emit32 (c, 0x4e800021) #define ppc_blr(c) ppc_emit32 (c, 0x4e800020) diff --git a/ppc/tramp.c b/ppc/tramp.c index e25db18..634d581 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -580,7 +580,7 @@ mono_create_trampoline (MonoMethod *method, int runtime) } -#define MINV_POS (- sizeof (MonoInvocation)) +#define MINV_POS 8 /* MonoInvocation structure offset on stack */ #define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) #define OBJ_POS 8 #define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) @@ -598,7 +598,120 @@ mono_create_trampoline (MonoMethod *method, int runtime) void * mono_create_method_pointer (MonoMethod *method) { - return NULL; + MonoMethodSignature *sig; + guint8 *p, *code_buffer; + guint code_size, stack_size, stackval_arg_pos; + + code_size = 512; + stack_size = 512; + + sig = method->signature; + + p = code_buffer = g_malloc (code_size); + + printf ("\nDelegate [start emiting] %s\n", method->name); + + /* jump after header which consist of "Mono" + method ptr */ + ppc_b (p, 3); + *p = 'M'; p ++; + *p = 'o'; p ++; + *p = 'n'; p ++; + *p = 'o'; p ++; + *(void **) p = method; p += 4; + + /* prolog */ + ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ + ppc_mflr (p, ppc_r0); /* r0 <--- LR */ + ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ + ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ + ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ + + /* let's fill MonoInvocation */ + /* first zero some fields */ + ppc_li (p, ppc_r0, 0); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), ppc_r31); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), ppc_r31); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), ppc_r31); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), ppc_r31); + + /* set method pointer */ + ppc_lis (p, ppc_r0, (guint32) method >> 16); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) method & 0xffff); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), ppc_r31); + + if (sig->hasthis) { + ppc_stw (p, ppc_r3, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), ppc_r31); + } + + /* set MonoInvocation::stack_args */ + stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); + ppc_addi (p, ppc_r0, ppc_r31, stackval_arg_pos); + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), ppc_r31); + + /* add stackval arguments */ + /* for (i = 0; i < sig->param_count; ++i) { + + + + ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + + x86_mov_reg_imm (p, X86_ECX, stackval_from_data); + x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); + x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + x86_push_reg (p, X86_EDX); + x86_push_reg (p, X86_EAX); + x86_push_imm (p, sig->params [i]); + x86_call_reg (p, X86_ECX); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 12); + stackval_pos += sizeof (stackval); + arg_pos += 4; + if (!sig->params [i]->byref) { + switch (sig->params [i]->type) { + case MONO_TYPE_I8: + case MONO_TYPE_R8: + arg_pos += 4; + break; + case MONO_TYPE_VALUETYPE: + g_assert_not_reached (); Not implemented yet. + default: + break; + } + } + } */ + + /* return value storage */ + if (sig->param_count) { + ppc_addi (p, ppc_r0, ppc_r31, stackval_arg_pos); + } + ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)), ppc_r31); + + /* call ves_exec_method */ + ppc_lis (p, ppc_r0, (guint32) ves_exec_method >> 16); + ppc_addi (p, ppc_r3, ppc_r31, MINV_POS); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) ves_exec_method & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + + /* move retval from stackval to proper place (r3/r4/...) */ + /* TODO */ + + /* epilog */ + ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ + ppc_lwz (p, ppc_r0, 4, ppc_r11); /* r0 <--- r11[4] load return address */ + ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ + ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ + ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ + ppc_blr (p); /* return */ + + printf ("emited code size: %d\n", p - code_buffer); + flush_icache (code_buffer, p - code_buffer); + + printf ("Delegate [end emiting]\n"); + + return (MonoPIFunc) code_buffer; } @@ -610,6 +723,8 @@ mono_create_method_pointer (MonoMethod *method) MonoMethod* mono_method_pointer_get (void *code) { - return NULL; + unsigned char *c = code; + if (c [4] != 'M' || c [5] != 'o' || c [6] != 'n' || c [7] != 'o') + return NULL; + return *(MonoMethod**)(c + 8); } - -- cgit v1.1 From 2217d1a7da2572afd033b958454b9662c42022b9 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Sun, 24 Feb 2002 17:44:55 +0000 Subject: * ARM support sources, initial check-in; svn path=/trunk/mono/; revision=2615 --- arm/.cvsignore | 13 + arm/Makefile.am | 24 ++ arm/arm-codegen.c | 193 +++++++++++++ arm/arm-codegen.h | 748 ++++++++++++++++++++++++++++++++++++++++++++++++++ arm/arm-dis.c | 494 +++++++++++++++++++++++++++++++++ arm/arm-dis.h | 40 +++ arm/cmp_macros.th | 11 + arm/dpi_macros.th | 44 +++ arm/dpiops.sh | 32 +++ arm/mov_macros.th | 39 +++ arm/tramp.c | 809 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 11 files changed, 2447 insertions(+) create mode 100644 arm/.cvsignore create mode 100644 arm/Makefile.am create mode 100644 arm/arm-codegen.c create mode 100644 arm/arm-codegen.h create mode 100644 arm/arm-dis.c create mode 100644 arm/arm-dis.h create mode 100644 arm/cmp_macros.th create mode 100644 arm/dpi_macros.th create mode 100755 arm/dpiops.sh create mode 100644 arm/mov_macros.th create mode 100644 arm/tramp.c diff --git a/arm/.cvsignore b/arm/.cvsignore new file mode 100644 index 0000000..3a221ac --- /dev/null +++ b/arm/.cvsignore @@ -0,0 +1,13 @@ +Makefile +Makefile.in +.deps +.libs +*.o +*.la +*.lo +*.lib +*.obj +*.exe +*.dll +arm_dpimacros.h +fixeol.sh \ No newline at end of file diff --git a/arm/Makefile.am b/arm/Makefile.am new file mode 100644 index 0000000..851c436 --- /dev/null +++ b/arm/Makefile.am @@ -0,0 +1,24 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-arm.la + +BUILT_SOURCES = arm_dpimacros.h + +SCRIPT_SOURCES = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th + +libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ + tramp.c \ + arm-codegen.c \ + arm-codegen.h \ + arm-dis.c \ + arm-dis.h \ + # + +arm_dpimacros.h: $(SCRIPT_SOURCES) + bash ./dpiops.sh + +CLEANFILES = $(BUILT_SOURCES) + +EXTRA_DIST = $(SCRIPT_SOURCES) + diff --git a/arm/arm-codegen.c b/arm/arm-codegen.c new file mode 100644 index 0000000..88d572a --- /dev/null +++ b/arm/arm-codegen.c @@ -0,0 +1,193 @@ +/* + * arm-codegen.c + * Copyright (c) 2002 Sergey Chaban + */ + +#include "arm-codegen.h" + + +arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) { + ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP); + + /* save args */ + ARM_PUSH(p, (1 << ARMREG_A1) + | (1 << ARMREG_A2) + | (1 << ARMREG_A3) + | (1 << ARMREG_A4)); + + ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR)); + + if (local_size != 0) { + if ((local_size & (~0xFF)) == 0) { + ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size); + } else { + /* TODO: optimize */ + p = arm_mov_reg_imm32(p, ARMREG_IP, local_size); + ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP); + ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t)); + ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP); + } + } + + return p; +} + +arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) { + if (local_size != 0) { + if ((local_size & (~0xFF)) == 0) { + ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size); + } else { + /* TODO: optimize */ + p = arm_mov_reg_imm32(p, ARMREG_IP, local_size); + ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP); + } + } + + ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF)); + + return p; +} + + +/* do not push A1-A4 */ +arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) { + ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP); + /* push_regs upto R10 will be saved */ + ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF)); + + if (local_size != 0) { + if ((local_size & (~0xFF)) == 0) { + ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size); + } else { + /* TODO: optimize */ + p = arm_mov_reg_imm32(p, ARMREG_IP, local_size); + ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP); + /* restore IP from stack */ + ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t)); + ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP); + } + } + + return p; +} + +/* Bit scan forward. */ +int arm_bsf(armword_t val) { + int i; + armword_t mask; + + if (val == 0) return 0; + for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1); + + return i; +} + + +int arm_is_power_of_2(armword_t val) { + return ((val & (val-1)) == 0); +} + + +/* + * returns: + * 1 - unable to represent + * positive even number - MOV-representable + * negative even number - MVN-representable + */ +int calc_arm_mov_const_shift(armword_t val) { + armword_t mask; + int res = 1, shift; + + for (shift=0; shift < 32; shift+=2) { + mask = ARM_SCALE(0xFF, shift); + if ((val & (~mask)) == 0) { + res = shift; + break; + } + if (((~val) & (~mask)) == 0) { + res = -shift - 2; + break; + } + } + + return res; +} + + +int is_arm_const(armword_t val) { + int res; + res = arm_is_power_of_2(val); + if (!res) { + res = calc_arm_mov_const_shift(val); + res = !(res < 0 || res == 1); + } + return res; +} + + +int arm_const_steps(armword_t val) { + int shift, steps = 0; + + while (val != 0) { + shift = (arm_bsf(val) - 1) & (~1); + val &= ~(0xFF << shift); + ++steps; + } + return steps; +} + + +/* + * ARM cannot load arbitrary 32-bit constants directly into registers; + * widely used work-around for this is to store constants into a + * PC-addressable pool and use LDR instruction with PC-relative address + * to load constant into register. Easiest way to implement this is to + * embed constant inside a function with unconditional branch around it. + * The above method is not used at the moment. + * This routine always emits sequence of instructions to generate + * requested constant. In the worst case it takes 4 instructions to + * synthesize a constant - 1 MOV and 3 subsequent ORRs. + */ +arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) { + int mov_op; + int step_op; + int snip; + int shift = calc_arm_mov_const_shift(imm32); + + if ((shift & 0x80000001) != 1) { + if (shift >= 0) { + ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift >> 1, cond); + } else { + ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2) >> 1, cond); + } + } else { + mov_op = ARMOP_MOV; + step_op = ARMOP_ORR; + + if (arm_const_steps(imm32) > arm_const_steps(~imm32)) { + mov_op = ARMOP_MVN; + step_op = ARMOP_SUB; + imm32 = ~imm32; + } + + shift = (arm_bsf(imm32) - 1) & (~1); + snip = imm32 & (0xFF << shift); + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND(snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond)); + + while ((imm32 ^= snip) != 0) { + shift = (arm_bsf(imm32) - 1) & (~1); + snip = imm32 & (0xFF << shift); + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND(snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond)); + } + } + + return p; +} + + +arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) { + return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL); +} + + + diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h new file mode 100644 index 0000000..3d6c798 --- /dev/null +++ b/arm/arm-codegen.h @@ -0,0 +1,748 @@ +/* + * arm-codegen.h + * Copyright (c) 2002 Sergey Chaban + */ + + +#ifndef ARM_H +#define ARM_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef unsigned int arminstr_t; +typedef unsigned int armword_t; + +/* Helper functions */ +arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size); +arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs); +arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs); +int arm_is_power_of_2(armword_t val); +int calc_arm_mov_const_shift(armword_t val); +int is_arm_const(armword_t val); +int arm_bsf(armword_t val); +arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond); +arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32); + + + +#if defined(_MSC_VER) || defined(__CC_NORCROFT) + void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;} +# define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i)) +#else +# define ARM_EMIT(p, i) *(arminstr_t*)p = (arminstr_t)i; ((arminstr_t*)p)++ +#endif + +/* even_scale = rot << 1 */ +#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) ) + + + +typedef enum { + ARMREG_R0 = 0, + ARMREG_R1, + ARMREG_R2, + ARMREG_R3, + ARMREG_R4, + ARMREG_R5, + ARMREG_R6, + ARMREG_R7, + ARMREG_R8, + ARMREG_R9, + ARMREG_R10, + ARMREG_R11, + ARMREG_R12, + ARMREG_R13, + ARMREG_R14, + ARMREG_R15, + + + /* aliases */ + /* args */ + ARMREG_A1 = ARMREG_R0, + ARMREG_A2 = ARMREG_R1, + ARMREG_A3 = ARMREG_R2, + ARMREG_A4 = ARMREG_R3, + + /* local vars */ + ARMREG_V1 = ARMREG_R4, + ARMREG_V2 = ARMREG_R5, + ARMREG_V3 = ARMREG_R6, + ARMREG_V4 = ARMREG_R7, + ARMREG_V5 = ARMREG_R8, + ARMREG_V6 = ARMREG_R9, + ARMREG_V7 = ARMREG_R10, + + ARMREG_FP = ARMREG_R11, + ARMREG_IP = ARMREG_R12, + ARMREG_SP = ARMREG_R13, + ARMREG_LR = ARMREG_R14, + ARMREG_PC = ARMREG_R15, + + /* co-processor */ + ARMREG_CR0 = 0, + ARMREG_CR1, + ARMREG_CR2, + ARMREG_CR3, + ARMREG_CR4, + ARMREG_CR5, + ARMREG_CR6, + ARMREG_CR7, + ARMREG_CR8, + ARMREG_CR9, + ARMREG_CR10, + ARMREG_CR11, + ARMREG_CR12, + ARMREG_CR13, + ARMREG_CR14, + ARMREG_CR15, + + ARMREG_MAX = ARMREG_R15 +} ARMReg; + +/* number of argument registers */ +#define ARM_NUM_ARG_REGS 4 + +/* bitvector for all argument regs (A1-A4) */ +#define ARM_ALL_ARG_REGS \ + (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4) + + +typedef enum { + ARMCOND_EQ = 0x0, /* Equal */ + ARMCOND_NE = 0x1, /* Not equal, or unordered */ + ARMCOND_CS = 0x2, /* Carry set */ + ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same */ + ARMCOND_CC = 0x3, /* Carry clear */ + ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */ + ARMCOND_MI = 0x4, /* Negative */ + ARMCOND_PL = 0x5, /* Positive or zero */ + ARMCOND_VS = 0x6, /* Overflow */ + ARMCOND_VC = 0x7, /* No overflow */ + ARMCOND_HI = 0x8, /* Unsigned higher */ + ARMCOND_LS = 0x9, /* Unsigned lower or same */ + ARMCOND_GE = 0xA, /* Signed greater than or equal */ + ARMCOND_LT = 0xB, /* Signed less than */ + ARMCOND_GT = 0xC, /* Signed greater than */ + ARMCOND_LE = 0xD, /* Signed less than or equal */ + ARMCOND_AL = 0xE, /* Always */ + ARMCOND_NV = 0xF, /* Never */ + + ARMCOND_SHIFT = 28 +} ARMCond; + +#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT) + +#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT) + + + +typedef enum { + ARMSHIFT_LSL = 0, + ARMSHIFT_LSR = 1, + ARMSHIFT_ASR = 2, + ARMSHIFT_ROR = 3, + + ARMSHIFT_ASL = ARMSHIFT_LSL + /* rrx = (ror, 1) */ +} ARMShiftType; + + +typedef struct { + armword_t PSR_c : 8; + armword_t PSR_x : 8; + armword_t PSR_s : 8; + armword_t PSR_f : 8; +} ARMPSR; + +typedef enum { + ARMOP_AND = 0x0, + ARMOP_EOR = 0x1, + ARMOP_SUB = 0x2, + ARMOP_RSB = 0x3, + ARMOP_ADD = 0x4, + ARMOP_ADC = 0x5, + ARMOP_SBC = 0x6, + ARMOP_RSC = 0x7, + ARMOP_TST = 0x8, + ARMOP_TEQ = 0x9, + ARMOP_CMP = 0xa, + ARMOP_CMN = 0xb, + ARMOP_ORR = 0xc, + ARMOP_MOV = 0xd, + ARMOP_BIC = 0xe, + ARMOP_MVN = 0xf, + + + /* not really opcodes */ + + ARMOP_STR = 0x0, + ARMOP_LDR = 0x1, + + /* ARM2+ */ + ARMOP_MUL = 0x0, /* Rd := Rm*Rs */ + ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */ + + /* ARM3M+ */ + ARMOP_UMUL = 0x4, + ARMOP_UMLAL = 0x5, + ARMOP_SMULL = 0x6, + ARMOP_SMLAL = 0x7 +} ARMOpcode; + + +/* Generic form - all ARM instructions are conditional. */ +typedef struct { + arminstr_t icode : 28; + arminstr_t cond : 4; +} ARMInstrGeneric; + + + +/* Branch or Branch with Link instructions. */ +typedef struct { + arminstr_t offset : 24; + arminstr_t link : 1; + arminstr_t tag : 3; /* 1 0 1 */ + arminstr_t cond : 4; +} ARMInstrBR; + +#define ARM_BR_ID 5 +#define ARM_BR_MASK 7 << 25 +#define ARM_BR_TAG ARM_BR_ID << 25 + +#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT)) + +/* branch */ +#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond)) +#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs)) +/* branch with link */ +#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond)) +#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs)) + + + +/* Data Processing Instructions - there are 3 types. */ + +typedef struct { + arminstr_t imm : 8; + arminstr_t rot : 4; +} ARMDPI_op2_imm; + +typedef struct { + arminstr_t rm : 4; + arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */ + arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */ +} ARMDPI_op2_reg_shift; + + +/* op2 is reg shift by imm */ +typedef union { + ARMDPI_op2_reg_shift r2; + struct { + arminstr_t _dummy_r2 : 7; + arminstr_t shift : 5; + } imm; +} ARMDPI_op2_reg_imm; + +/* op2 is reg shift by reg */ +typedef union { + ARMDPI_op2_reg_shift r2; + struct { + arminstr_t _dummy_r2 : 7; + arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */ + arminstr_t rs : 4; + } reg; +} ARMDPI_op2_reg_reg; + +/* Data processing instrs */ +typedef union { + ARMDPI_op2_imm op2_imm; + + ARMDPI_op2_reg_shift op2_reg; + ARMDPI_op2_reg_imm op2_reg_imm; + ARMDPI_op2_reg_reg op2_reg_reg; + + struct { + arminstr_t op2 : 12; /* raw operand 2 */ + arminstr_t rd : 4; /* destination reg */ + arminstr_t rn : 4; /* first operand reg */ + arminstr_t s : 1; /* S-bit controls PSR update */ + arminstr_t opcode : 4; /* arithmetic/logic operation */ + arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */ + arminstr_t tag : 2; /* 0 0 */ + arminstr_t cond : 4; + } all; +} ARMInstrDPI; + +#define ARM_DPI_ID 0 +#define ARM_DPI_MASK 3 << 26 +#define ARM_DPI_TAG ARM_DPI_ID << 26 + +#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \ + ((imm8) & 0xFF) | \ + (((rot) & 0xF) << 8) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((s) << 20) | \ + ((op) << 21) | \ + (1 << 25) | \ + (ARM_DPI_TAG) | \ + ARM_DEF_COND(cond) + + +#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \ + ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL) + + +#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond)) +#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond)) + + + +#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \ + (rm) | \ + ((shift_type & 3) << 5) | \ + (((imm_shift) & 0x1F) << 7) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((s) << 20) | \ + ((op) << 21) | \ + (ARM_DPI_TAG) | \ + ARM_DEF_COND(cond) + +#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond)) + +#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond)) + +#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond)) + +#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond)) + + + + + +/* Multiple register transfer. */ +typedef struct { + arminstr_t reg_list : 16; /* bitfield */ + arminstr_t rn : 4; /* base reg */ + arminstr_t ls : 1; /* load(1)/store(0) */ + arminstr_t wb : 1; /* write-back "!" */ + arminstr_t s : 1; /* restore PSR, force user bit */ + arminstr_t u : 1; /* up/down */ + arminstr_t p : 1; /* pre(1)/post(0) index */ + arminstr_t tag : 3; /* 1 0 0 */ + arminstr_t cond : 4; +} ARMInstrMRT; + +#define ARM_MRT_ID 4 +#define ARM_MRT_MASK 7 << 25 +#define ARM_MRT_TAG ARM_MRT_ID << 25 + +#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \ + (regs) | \ + (rn << 16) | \ + (l << 20) | \ + (w << 21) | \ + (s << 22) | \ + (u << 23) | \ + (p << 24) | \ + (ARM_MRT_TAG) | \ + ARM_DEF_COND(cond) + + + +/* stmdb sp!, {regs} */ +#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL)) + +/* ldmia sp!, {regs} */ +#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL)) + +/* ldmia sp, {regs} ; (no write-back) */ +#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL)) + + + +/* Multiply instructions */ +typedef struct { + arminstr_t rm : 4; + arminstr_t tag2 : 4; /* 9 */ + arminstr_t rs : 4; + arminstr_t rn : 4; + arminstr_t rd : 4; + arminstr_t s : 1; + arminstr_t opcode : 3; + arminstr_t tag : 4; + arminstr_t cond : 4; +} ARMInstrMul; + +#define ARM_MUL_ID 0 +#define ARM_MUL_ID2 9 +#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4)) +#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4)) + + +/* Word/byte transfer */ +typedef union { + ARMDPI_op2_reg_imm op2_reg_imm; + struct { + arminstr_t op2_imm : 12; + arminstr_t rd : 4; + arminstr_t rn : 4; + arminstr_t ls : 1; + arminstr_t wb : 1; + arminstr_t b : 1; + arminstr_t u : 1; + arminstr_t p : 1; /* post-index(0) / pre-index(1) */ + arminstr_t type : 1; /* imm(0) / register(1) */ + arminstr_t tag : 2; /* 0 1 */ + arminstr_t cond : 4; + } all; +} ARMInstrWXfer; + +#define ARM_WXFER_ID 1 +#define ARM_WXFER_MASK 3 << 26 +#define ARM_WXFER_TAG ARM_WXFER_ID << 26 + + +#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \ + ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((ls) << 20) | \ + ((wb) << 21) | \ + ((b) << 22) | \ + (((int)(imm12) >= 0) << 23) | \ + ((p) << 24) | \ + ARM_WXFER_TAG | \ + ARM_DEF_COND(cond) + +#define ARM_WXFER_MAX_OFFS 0xFFF + +/* this macro checks imm12 bounds */ +#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \ + do { \ + int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \ + ? -ARM_WXFER_MAX_OFFS \ + : (int)(imm12) > ARM_WXFER_MAX_OFFS \ + ? ARM_WXFER_MAX_OFFS \ + : (int)(imm12); \ + ARM_EMIT((ptr), \ + ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \ + } while (0) + + +/* LDRx */ +/* immediate offset, post-index */ +#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond)) + +#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL) + +#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond)) + +#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL) + +/* immediate offset, pre-index */ +#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond)) + +#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + +#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond)) + +#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + +/* STRx */ +/* immediate offset, post-index */ +#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond)) + +#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL) + +#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond)) + +#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL) + +/* immediate offset, pre-index */ +#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond) +/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */ + +#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + +#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond)) + +#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + + + +#define ARM_DEF_WXFER_REG_REG(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \ + (rm) | \ + ((shift_type) << 5) | \ + ((shift) << 7) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((ls) << 20) | \ + ((wb) << 21) | \ + ((b) << 22) | \ + ((p) << 24) | \ + (1 << 25) | \ + ARM_WXFER_TAG | \ + ARM_DEF_COND(cond) + + +#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_REG_REG(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond)) +#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \ + ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL) +#define ARM_LDR_REG_REG(p, rd, rn, rm) \ + ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0) + + + +/* Half-word or byte (signed) transfer. */ +typedef struct { + arminstr_t rm : 4; /* imm_lo */ + arminstr_t tag3 : 1; /* 1 */ + arminstr_t h : 1; /* half-word or byte */ + arminstr_t s : 1; /* sign-extend or zero-extend */ + arminstr_t tag2 : 1; /* 1 */ + arminstr_t imm_hi : 4; + arminstr_t rd : 4; + arminstr_t rn : 4; + arminstr_t ls : 1; + arminstr_t wb : 1; + arminstr_t type : 1; /* imm(1) / reg(0) */ + arminstr_t u : 1; /* +- */ + arminstr_t p : 1; /* pre/post-index */ + arminstr_t tag : 3; + arminstr_t cond : 4; +} ARMInstrHXfer; + +#define ARM_HXFER_ID 0 +#define ARM_HXFER_ID2 1 +#define ARM_HXFER_ID3 1 +#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4)) +#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4)) + +#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \ + ((imm) & 0xF) | \ + ((h) << 5) | \ + ((s) << 6) | \ + (((imm) << 4) & (0xF << 8)) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((ls) << 20) | \ + ((wb) << 21) | \ + (1 << 22) | \ + (((int)(imm) >= 0) << 23) | \ + ((p) << 24) | \ + ARM_HXFER_TAG | \ + ARM_DEF_COND(cond) + +#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRH_IMM(p, rd, rn, imm) \ + ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL) +#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRSH_IMM(p, rd, rn, imm) \ + ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL) +#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRSB_IMM(p, rd, rn, imm) \ + ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + + +#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond)) +#define ARM_STRH_IMM(p, rd, rn, imm) \ + ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL) + + + +/* Swap */ +typedef struct { + arminstr_t rm : 4; + arminstr_t tag3 : 8; /* 0x9 */ + arminstr_t rd : 4; + arminstr_t rn : 4; + arminstr_t tag2 : 2; + arminstr_t b : 1; + arminstr_t tag : 5; /* 0x2 */ + arminstr_t cond : 4; +} ARMInstrSwap; + +#define ARM_SWP_ID 2 +#define ARM_SWP_ID2 9 +#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4)) +#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4)) + + + +/* Software interrupt */ +typedef struct { + arminstr_t num : 24; + arminstr_t tag : 4; + arminstr_t cond : 4; +} ARMInstrSWI; + +#define ARM_SWI_ID 0xF +#define ARM_SWI_MASK (0xF << 24) +#define ARM_SWI_TAG (ARM_SWI_ID << 24) + + + +/* Co-processor Data Processing */ +typedef struct { + arminstr_t crm : 4; + arminstr_t tag2 : 1; /* 0 */ + arminstr_t op2 : 3; + arminstr_t cpn : 4; /* CP number */ + arminstr_t crd : 4; + arminstr_t crn : 4; + arminstr_t op : 4; + arminstr_t tag : 4; /* 0xE */ + arminstr_t cond : 4; +} ARMInstrCDP; + +#define ARM_CDP_ID 0xE +#define ARM_CDP_ID2 0 +#define ARM_CDP_MASK ((0xF << 24) | (1 << 4)) +#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4)) + + +/* Co-processor Data Transfer (ldc/stc) */ +typedef struct { + arminstr_t offs : 8; + arminstr_t cpn : 4; + arminstr_t crd : 4; + arminstr_t rn : 4; + arminstr_t ls : 1; + arminstr_t wb : 1; + arminstr_t n : 1; + arminstr_t u : 1; + arminstr_t p : 1; + arminstr_t tag : 3; + arminstr_t cond : 4; +} ARMInstrCDT; + +#define ARM_CDT_ID 6 +#define ARM_CDT_MASK (7 << 25) +#define ARM_CDT_TAG (ARM_CDT_ID << 25) + + +/* Co-processor Register Transfer (mcr/mrc) */ +typedef struct { + arminstr_t crm : 4; + arminstr_t tag2 : 1; + arminstr_t op2 : 3; + arminstr_t cpn : 4; + arminstr_t rd : 4; + arminstr_t crn : 4; + arminstr_t ls : 1; + arminstr_t op1 : 3; + arminstr_t tag : 4; + arminstr_t cond : 4; +} ARMInstrCRT; + +#define ARM_CRT_ID 0xE +#define ARM_CRT_ID2 0x1 +#define ARM_CRT_MASK ((0xF << 24) | (1 << 4)) +#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4)) + +/* Move register to PSR. */ +typedef union { + ARMDPI_op2_imm op2_imm; + struct { + arminstr_t rm : 4; + arminstr_t pad : 8; /* 0 */ + arminstr_t tag4 : 4; /* 0xF */ + arminstr_t fld : 4; + arminstr_t tag3 : 2; /* 0x2 */ + arminstr_t sel : 1; + arminstr_t tag2 : 2; /* 0x2 */ + arminstr_t type : 1; + arminstr_t tag : 2; /* 0 */ + arminstr_t cond : 4; + } all; +} ARMInstrMSR; + +#define ARM_MSR_ID 0 +#define ARM_MSR_ID2 2 +#define ARM_MSR_ID3 2 +#define ARM_MSR_ID4 0xF +#define ARM_MSR_MASK ((3 << 26) | \ + (3 << 23) | \ + (3 << 20) | \ + (0xF << 12)) +#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \ + (ARM_MSR_ID2 << 23) | \ + (ARM_MSR_ID3 << 20) | \ + (ARM_MSR_ID4 << 12)) + + +/* Move PSR to register. */ +typedef struct { + arminstr_t tag3 : 12; + arminstr_t rd : 4; + arminstr_t tag2 : 6; + arminstr_t sel : 1; /* CPSR | SPSR */ + arminstr_t tag : 5; + arminstr_t cond : 4; +} ARMInstrMRS; + +#define ARM_MRS_ID 2 +#define ARM_MRS_ID2 0xF +#define ARM_MRS_ID3 0 +#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF) +#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3) + + + +typedef union { + ARMInstrBR br; + ARMInstrDPI dpi; + ARMInstrMRT mrt; + ARMInstrMul mul; + ARMInstrWXfer wxfer; + ARMInstrHXfer hxfer; + ARMInstrSwap swp; + ARMInstrCDP cdp; + ARMInstrCDT cdt; + ARMInstrCRT crt; + ARMInstrSWI swi; + ARMInstrMSR msr; + ARMInstrMRS mrs; + + ARMInstrGeneric generic; + arminstr_t raw; +} ARMInstr; + + +#include "arm_dpimacros.h" + +#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0) + + +#ifdef __cplusplus +} +#endif + +#endif /* ARM_H */ + + + diff --git a/arm/arm-dis.c b/arm/arm-dis.c new file mode 100644 index 0000000..caec7e5 --- /dev/null +++ b/arm/arm-dis.c @@ -0,0 +1,494 @@ +/* + * Copyright (c) 2002 Sergey Chaban + */ + + +#include + +#include "arm-dis.h" +#include "arm-codegen.h" + + +static ARMDis* gdisasm = NULL; + +static int use_reg_alias = 1; + +const static char* cond[] = { + "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "", "nv" +}; + +const static char* ops[] = { + "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc", + "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn" +}; + +const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"}; + +const static char* mul_ops[] = { + "mul", "mla", "?", "?", "umul", "umlal", "smull", "smlal" +}; + +const static char* reg_alias[] = { + "a1", "a2", "a3", "a4", + "r4", "r5", "r6", "r7", "r8", "r9", "r10", + "fp", "ip", "sp", "lr", "pc" +}; + +const static char* msr_fld[] = {"f", "c", "x", "?", "s"}; + + +/* private functions prototypes (to keep compiler happy) */ +void chk_out(ARMDis* dis); +void dump_reg(ARMDis* dis, int reg); +void dump_creg(ARMDis* dis, int creg); +void dump_reglist(ARMDis* dis, int reg_list); +void init_gdisasm(void); + +void dump_br(ARMDis* dis, ARMInstr i); +void dump_cdp(ARMDis* dis, ARMInstr i); +void dump_cdt(ARMDis* dis, ARMInstr i); +void dump_crt(ARMDis* dis, ARMInstr i); +void dump_dpi(ARMDis* dis, ARMInstr i); +void dump_hxfer(ARMDis* dis, ARMInstr i); +void dump_mrs(ARMDis* dis, ARMInstr i); +void dump_mrt(ARMDis* dis, ARMInstr i); +void dump_msr(ARMDis* dis, ARMInstr i); +void dump_mul(ARMDis* dis, ARMInstr i); +void dump_swi(ARMDis* dis, ARMInstr i); +void dump_swp(ARMDis* dis, ARMInstr i); +void dump_wxfer(ARMDis* dis, ARMInstr i); + + +/* +void out(ARMDis* dis, const char* format, ...) { + va_list arglist; + va_start(arglist, format); + fprintf(dis->dis_out, format, arglist); + va_end(arglist); +} +*/ + + +void chk_out(ARMDis* dis) { + if (dis != NULL && dis->dis_out == NULL) dis->dis_out = stdout; +} + + +void armdis_set_output(ARMDis* dis, FILE* f) { + if (dis != NULL) { + dis->dis_out = f; + chk_out(dis); + } +} + +FILE* armdis_get_output(ARMDis* dis) { + return (dis != NULL ? dis->dis_out : NULL); +} + + + + +void dump_reg(ARMDis* dis, int reg) { + reg &= 0xF; + if (!use_reg_alias || (reg > 3 && reg < 11)) { + fprintf(dis->dis_out, "r%d", reg); + } else { + fprintf(dis->dis_out, reg_alias[reg]); + } +} + +void dump_creg(ARMDis* dis, int creg) { + if (dis != NULL) { + creg &= 0xF; + fprintf(dis->dis_out, "c%d", creg); + } +} + +void dump_reglist(ARMDis* dis, int reg_list) { + int i = 0, j, n = 0; + int m1 = 1, m2, rn; + while (i < 16) { + if ((reg_list & m1) != 0) { + if (n != 0) fprintf(dis->dis_out, ", "); + n++; + dump_reg(dis, i); + for (j = i+1, rn = 0, m2 = m1<<1; j < 16; ++j, m2<<=1) { + if ((reg_list & m2) != 0) ++rn; + else break; + } + i+=rn; + if (rn > 1) { + fprintf(dis->dis_out, "-"); + dump_reg(dis, i); + } else if (rn == 1) { + fprintf(dis->dis_out, ", "); + dump_reg(dis, i); + } + m1<<=(rn+1); + i++; + } else { + ++i; + m1<<=1; + } + } +} + + +void dump_br(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "b%s%s\t%x", + (i.br.link == 1) ? "l" : "", + cond[i.br.cond], i.br.offset); +} + + +void dump_dpi(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]); + + if ((i.dpi.all.opcode >= ARMOP_TST) && (i.dpi.all.opcode <= ARMOP_CMN) && (i.dpi.all.s != 0)) { + fprintf(dis->dis_out, "s"); + } + + fprintf(dis->dis_out, "\t"); + + if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) { + /* comparison operation */ + dump_reg(dis, i.dpi.all.rd); + fprintf(dis->dis_out, ", "); + } + + if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) { + dump_reg(dis, i.dpi.all.rn); + fprintf(dis->dis_out, ", "); + } + + if (i.dpi.all.type == 1) { + /* immediate */ + if (i.dpi.op2_imm.rot != 0) { + fprintf(dis->dis_out, "#%d, %d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.rot << 1, + ARM_SCALE(i.dpi.op2_imm.imm, (i.dpi.op2_imm.rot << 1)) ); + } else { + fprintf(dis->dis_out, "#%d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.imm); + } + } else { + /* reg-reg */ + if (i.dpi.op2_reg.tag == 0) { + /* op2 is reg shift by imm */ + dump_reg(dis, i.dpi.op2_reg_imm.r2.rm); + if (i.dpi.op2_reg_imm.imm.shift != 0) { + fprintf(dis->dis_out, " %s #%d", shift_types[i.dpi.op2_reg_imm.r2.type], i.dpi.op2_reg_imm.imm.shift); + } + } else { + /* op2 is reg shift by reg */ + dump_reg(dis, i.dpi.op2_reg_reg.r2.rm); + fprintf(dis->dis_out, " %s ", shift_types[i.dpi.op2_reg_reg.r2.type]); + dump_reg(dis, i.dpi.op2_reg_reg.reg.rs); + } + + } +} + +void dump_wxfer(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s%s\t", + (i.wxfer.all.ls == 0) ? "str" : "ldr", + cond[i.generic.cond], + (i.wxfer.all.b == 0) ? "" : "b"); + dump_reg(dis, i.wxfer.all.rd); + fprintf(dis->dis_out, ", ["); + dump_reg(dis, i.wxfer.all.rn); + fprintf(dis->dis_out, "%s, ", (i.wxfer.all.p == 0) ? "]" : ""); + + if (i.wxfer.all.type == 0) { /* imm */ + fprintf(dis->dis_out, "#%s%d", (i.wxfer.all.u == 0) ? "-" : "", i.wxfer.all.op2_imm); + } else { + dump_reg(dis, i.wxfer.op2_reg_imm.r2.rm); + if (i.wxfer.op2_reg_imm.imm.shift != 0) { + fprintf(dis->dis_out, " %s #%d", shift_types[i.wxfer.op2_reg_imm.r2.type], i.wxfer.op2_reg_imm.imm.shift); + } + } + + if (i.wxfer.all.p != 0) { + /* close pre-index instr, also check for write-back */ + fprintf(dis->dis_out, "]%s", (i.wxfer.all.wb != 0) ? "!" : ""); + } +} + +void dump_hxfer(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s%s%s\t", + (i.hxfer.ls == 0) ? "str" : "ldr", + cond[i.generic.cond], + (i.hxfer.s != 0) ? "s" : "", + (i.hxfer.h != 0) ? "h" : "b"); + dump_reg(dis, i.hxfer.rd); + fprintf(dis->dis_out, ", ["); + dump_reg(dis, i.hxfer.rn); + fprintf(dis->dis_out, "%s, ", (i.hxfer.p == 0) ? "]" : ""); + + if (i.hxfer.type != 0) { /* imm */ + fprintf(dis->dis_out, "#%s%d", (i.hxfer.u == 0) ? "-" : "", (i.hxfer.imm_hi << 4) | i.hxfer.rm); + } else { + dump_reg(dis, i.hxfer.rm); + } + + if (i.hxfer.p != 0) { + /* close pre-index instr, also check for write-back */ + fprintf(dis->dis_out, "]%s", (i.hxfer.wb != 0) ? "!" : ""); + } +} + + +void dump_mrt(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s%s%s\t", (i.mrt.ls == 0) ? "stm" : "ldm", cond[i.mrt.cond], + (i.mrt.u == 0) ? "d" : "i", (i.mrt.p == 0) ? "a" : "b"); + dump_reg(dis, i.mrt.rn); + fprintf(dis->dis_out, "%s, {", (i.mrt.wb != 0) ? "!" : ""); + dump_reglist(dis, i.mrt.reg_list); + fprintf(dis->dis_out, "}"); +} + + +void dump_swp(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "swp%s%s ", cond[i.swp.cond], (i.swp.b != 0) ? "b" : ""); + dump_reg(dis, i.swp.rd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.swp.rm); + fprintf(dis->dis_out, ", ["); + dump_reg(dis, i.swp.rn); + fprintf(dis->dis_out, "]"); +} + + +void dump_mul(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s%s\t", mul_ops[i.mul.opcode], cond[i.mul.cond], (i.mul.s != 0) ? "s" : ""); + switch (i.mul.opcode) { + case ARMOP_MUL: + dump_reg(dis, i.mul.rd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rm); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rs); + break; + case ARMOP_MLA: + dump_reg(dis, i.mul.rd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rm); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rs); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rn); + break; + case ARMOP_UMUL: + case ARMOP_UMLAL: + case ARMOP_SMULL: + case ARMOP_SMLAL: + dump_reg(dis, i.mul.rd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rn); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rm); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.mul.rs); + break; + default: + fprintf(dis->dis_out, "DCD 0x%x\t; ", i.raw); + break; + } +} + + +void dump_cdp(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "cdp%s\tp%d, %d, ", cond[i.generic.cond], i.cdp.cpn, i.cdp.op); + dump_creg(dis, i.cdp.crd); + fprintf(dis->dis_out, ", "); + dump_creg(dis, i.cdp.crn); + fprintf(dis->dis_out, ", "); + dump_creg(dis, i.cdp.crm); + + if (i.cdp.op2 != 0) { + fprintf(dis->dis_out, ", %d", i.cdp.op2); + } +} + + +void dump_cdt(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s%s\tp%d, ", (i.cdt.ls == 0) ? "stc" : "ldc", + cond[i.generic.cond], (i.cdt.n != 0) ? "l" : "", i.cdt.cpn); + dump_creg(dis, i.cdt.crd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.cdt.rn); + + if (i.cdt.p == 0) { + fprintf(dis->dis_out, "]"); + } + + if (i.cdt.offs != 0) { + fprintf(dis->dis_out, ", #%d", i.cdt.offs); + } + + if (i.cdt.p != 0) { + fprintf(dis->dis_out, "]%s", (i.cdt.wb != 0) ? "!" : ""); + } +} + + +void dump_crt(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "%s%s\tp%d, %d, ", (i.crt.ls == 0) ? "mrc" : "mcr", + cond[i.generic.cond], i.crt.cpn, i.crt.op1); + dump_reg(dis, i.crt.rd); + fprintf(dis->dis_out, ", "); + dump_creg(dis, i.crt.crn); + fprintf(dis->dis_out, ", "); + dump_creg(dis, i.crt.crm); + + if (i.crt.op2 != 0) { + fprintf(dis->dis_out, ", %d", i.crt.op2); + } +} + + +void dump_msr(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "msr%s\t%spsr_, ", cond[i.generic.cond], + (i.msr.all.sel == 0) ? "s" : "c"); + if (i.msr.all.type == 0) { + /* reg */ + fprintf(dis->dis_out, "%s, ", msr_fld[i.msr.all.fld]); + dump_reg(dis, i.msr.all.rm); + } else { + /* imm */ + fprintf(dis->dis_out, "f, #%d", i.msr.op2_imm.imm << i.msr.op2_imm.rot); + } +} + + +void dump_mrs(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "mrs%s\t", cond[i.generic.cond]); + dump_reg(dis, i.mrs.rd); + fprintf(dis->dis_out, ", %spsr", (i.mrs.sel == 0) ? "s" : "c"); +} + + +void dump_swi(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "swi%s\t%d", cond[i.generic.cond], i.swi.num); +} + + + +void armdis_decode(ARMDis* dis, void* p, int size) { + int i; + arminstr_t* pi = (arminstr_t*)p; + ARMInstr instr; + + if (dis == NULL) return; + + chk_out(dis); + + size/=sizeof(arminstr_t); + + for (i=0; idis_out, "%p:\t%08x\t", pi, *pi); + instr.raw = *pi++; + + if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) { + dump_br(dis, instr); + } else if ((instr.raw & ARM_SWP_MASK) == ARM_SWP_TAG) { + dump_swp(dis, instr); + } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) { + dump_mul(dis, instr); + } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) { + dump_wxfer(dis, instr); + } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) { + dump_hxfer(dis, instr); + } else if ((instr.raw & ARM_DPI_MASK) == ARM_DPI_TAG) { + dump_dpi(dis, instr); + } else if ((instr.raw & ARM_MRT_MASK) == ARM_MRT_TAG) { + dump_mrt(dis, instr); + } else if ((instr.raw & ARM_CDP_MASK) == ARM_CDP_TAG) { + dump_cdp(dis, instr); + } else if ((instr.raw & ARM_CDT_MASK) == ARM_CDT_TAG) { + dump_cdt(dis, instr); + } else if ((instr.raw & ARM_CRT_MASK) == ARM_CRT_TAG) { + dump_crt(dis, instr); + } else if ((instr.raw & ARM_MSR_MASK) == ARM_MSR_TAG) { + dump_msr(dis, instr); + } else if ((instr.raw & ARM_MRS_MASK) == ARM_MRS_TAG) { + dump_mrs(dis, instr); + } else if ((instr.raw & ARM_SWI_MASK) == ARM_SWI_TAG) { + dump_swi(dis, instr); + } else { + fprintf(dis->dis_out, "DCD 0x%x\t; ", instr.raw); + } + + fprintf(dis->dis_out, "\n"); + } +} + + +void armdis_open(ARMDis* dis, const char* dump_name) { + if (dis != NULL && dump_name != NULL) { + armdis_set_output(dis, fopen(dump_name, "w")); + } +} + + +void armdis_close(ARMDis* dis) { + if (dis->dis_out != NULL && dis->dis_out != stdout && dis->dis_out != stderr) { + fclose(dis->dis_out); + dis->dis_out = NULL; + } +} + + +void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size) { + armdis_open(dis, dump_name); + armdis_decode(dis, p, size); + armdis_close(dis); +} + + +void armdis_init(ARMDis* dis) { + if (dis != NULL) { + /* set to stdout */ + armdis_set_output(dis, NULL); + } +} + + + + +void init_gdisasm() { + if (gdisasm == NULL) { + gdisasm = (ARMDis*)malloc(sizeof(ARMDis)); + armdis_init(gdisasm); + } +} + +void _armdis_set_output(FILE* f) { + init_gdisasm(); + armdis_set_output(gdisasm, f); +} + +FILE* _armdis_get_output() { + init_gdisasm(); + return armdis_get_output(gdisasm); +} + +void _armdis_decode(void* p, int size) { + init_gdisasm(); + armdis_decode(gdisasm, p, size); +} + +void _armdis_open(const char* dump_name) { + init_gdisasm(); + armdis_open(gdisasm, dump_name); +} + +void _armdis_close() { + init_gdisasm(); + armdis_close(gdisasm); +} + +void _armdis_dump(const char* dump_name, void* p, int size) { + init_gdisasm(); + armdis_dump(gdisasm, dump_name, p, size); +} + diff --git a/arm/arm-dis.h b/arm/arm-dis.h new file mode 100644 index 0000000..b93db30 --- /dev/null +++ b/arm/arm-dis.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2002 Sergey Chaban + */ + +#ifndef ARM_DIS +#define ARM_DIS + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _ARMDis { + FILE* dis_out; +} ARMDis; + + +void _armdis_set_output(FILE* f); +FILE* _armdis_get_output(void); +void _armdis_decode(void* p, int size); +void _armdis_open(const char* dump_name); +void _armdis_close(void); +void _armdis_dump(const char* dump_name, void* p, int size); + + +void armdis_init(ARMDis* dis); +void armdis_set_output(ARMDis* dis, FILE* f); +FILE* armdis_get_output(ARMDis* dis); +void armdis_decode(ARMDis* dis, void* p, int size); +void armdis_open(ARMDis* dis, const char* dump_name); +void armdis_close(ARMDis* dis); +void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size); + +#ifdef __cplusplus +} +#endif + +#endif /* ARM_DIS */ diff --git a/arm/cmp_macros.th b/arm/cmp_macros.th new file mode 100644 index 0000000..8a35708 --- /dev/null +++ b/arm/cmp_macros.th @@ -0,0 +1,11 @@ +/* PSR = rd, (imm8 ROR 2*rot) */ +#define ARM__REG_IMM_COND(p, rd, imm8, rot, cond) \ + ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, rd, 0, imm8, rot, cond) +#define ARM__REG_IMM(p, rd, imm8, rot) \ + ARM__REG_IMM_COND(p, rd, imm8, rot, ARMCOND_AL) +/* PSR = rd, imm8 */ +#define ARM__REG_IMM8_COND(p, rd, imm8, cond) \ + ARM__REG_IMM_COND(p, rd, imm8, 0, cond) +#define ARM__REG_IMM8(p, rd, imm8) \ + ARM__REG_IMM8_COND(p, rd, imm8, ARMCOND_AL) + diff --git a/arm/dpi_macros.th b/arm/dpi_macros.th new file mode 100644 index 0000000..f8ec608 --- /dev/null +++ b/arm/dpi_macros.th @@ -0,0 +1,44 @@ +/* -- -- */ + +/* rd = rn (imm8 ROR rot) ; rot is power of 2 */ +#define ARM__REG_IMM_COND(p, rd, rn, imm8, rot, cond) \ + ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, rd, rn, imm8, rot, cond) +#define ARM__REG_IMM(p, rd, rn, imm8, rot) \ + ARM__REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL) +#define ARM_S_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \ + ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_, rd, rn, imm8, rot, cond) +#define ARM_S_REG_IMM(p, rd, rn, imm8, rot) \ + ARM_S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL) + +/* rd = rn imm8 */ +#define ARM__REG_IMM8_COND(p, rd, rn, imm8, cond) \ + ARM__REG_IMM_COND(p, rd, rn, imm8, 0, cond) +#define ARM__REG_IMM8(p, rd, rn, imm8) \ + ARM__REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL) +#define ARM_S_REG_IMM8_COND(p, rd, rn, imm8, cond) \ + ARM_S_REG_IMM_COND(p, rd, rn, imm8, 0, cond) +#define ARM_S_REG_IMM8(p, rd, rn, imm8) \ + ARM_S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL) + + +/* rd = rn rm */ +#define ARM__REG_REG_COND(p, rd, rn, rm, cond) \ + ARM_DPIOP_REG_REG_COND(p, ARMOP_, rd, rn, rm, cond) +#define ARM__REG_REG(p, rd, rn, rm) \ + ARM__REG_REG_COND(p, rd, rn, rm, ARMCOND_AL) +#define ARM_S_REG_REG_COND(p, rd, rn, rm, cond) \ + ARM_DPIOP_S_REG_REG_COND(p, ARMOP_, rd, rn, rm, cond) +#define ARM_S_REG_REG(p, rd, rn, rm) \ + ARM_S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL) + +/* rd = rn (rm imm_shift) */ +#define ARM__REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \ + ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_, rd, rn, rm, shift_type, imm_shift, cond) +#define ARM__REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \ + ARM__REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL) +#define ARM_S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \ + ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_, rd, rn, rm, shift_type, imm_shift, cond) +#define ARM_S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \ + ARM_S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL) + + diff --git a/arm/dpiops.sh b/arm/dpiops.sh new file mode 100755 index 0000000..2eb43d9 --- /dev/null +++ b/arm/dpiops.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC" +CMP_OPCODES="TST TEQ CMP CMN" +MOV_OPCODES="MOV MVN" +OUTFILE=arm_dpimacros.h + +# $1: opcode list +# $2: template +function gen() { + for i in $1; do + sed "s//$i/g" $2.th >> $OUTFILE + done +} + + + +echo -e "/* Macros for DPI ops, auto-generated from template */\n" > $OUTFILE + +echo -e "\n/* mov/mvn */\n" >> $OUTFILE +gen "$MOV_OPCODES" mov_macros + +echo -e "\n/* DPIs, arithmetic and logical */\n" >> $OUTFILE +gen "$OPCODES" dpi_macros + +echo -e "\n\n" >> $OUTFILE + +echo -e "\n/* DPIs, comparison */\n" >> $OUTFILE +gen "$CMP_OPCODES" cmp_macros + +echo -e "/* end generated */\n\n" >> $OUTFILE + diff --git a/arm/mov_macros.th b/arm/mov_macros.th new file mode 100644 index 0000000..151a29b --- /dev/null +++ b/arm/mov_macros.th @@ -0,0 +1,39 @@ +/* rd = imm8 ROR rot */ +#define ARM__REG_IMM_COND(p, reg, imm8, rot, cond) \ + ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, rot, cond) +#define ARM__REG_IMM(p, reg, imm8, rot) \ + ARM__REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL) +#define ARM__REG_IMM8(p, reg, imm8) \ + ARM__REG_IMM(p, reg, imm8, 0) +/* S */ +#define ARM_S_REG_IMM_COND(p, reg, imm8, rot, cond) \ + ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, rot, cond) +#define ARM_S_REG_IMM(p, reg, imm8, rot) \ + ARM_S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL) + + +/* rd = rm */ +#define ARM__REG_REG_COND(p, rd, rm, cond) \ + ARM_DPIOP_REG_REG_COND(p, ARMOP_, rd, 0, rm, cond) +#define ARM__REG_REG(p, rd, rm) \ + ARM__REG_REG_COND(p, rd, rm, ARMCOND_AL) +/* S */ +#define ARM_S_REG_REG_COND(p, rd, rm, cond) \ + ARM_DPIOP_S_REG_REG_COND(p, ARMOP_, rd, 0, rm, cond) +#define ARM_S_REG_REG(p, rd, rm) \ + ARM_S_REG_REG_COND(p, rd, rm, ARMCOND_AL) + + + +/* rd = rm imm_shift */ +#define ARM__REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \ + ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_, rd, 0, rm, shift_type, imm_shift, cond) +#define ARM__REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \ + ARM__REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL) +/* S */ +#define ARM_S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \ + ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_, rd, 0, rm, shift_type, imm_shift, cond) +#define ARM_S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \ + ARM_S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL) + + diff --git a/arm/tramp.c b/arm/tramp.c new file mode 100644 index 0000000..0a7c47d --- /dev/null +++ b/arm/tramp.c @@ -0,0 +1,809 @@ +/* + * Create trampolines to invoke arbitrary functions. + * Copyright (c) 2002 Sergey Chaban + */ + +#include "arm-codegen.h" +#include "arm-dis.h" + +#if defined(_WIN32_WCE) || defined (UNDER_CE) +# include +#endif + +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" + + +#if 1 +# define ARM_DUMP_DISASM 1 +#endif + + +/* prototypes for private functions (to avoid compiler warnings) */ +MonoString* mono_string_new_wrapper (const char* text); +void flush_icache (void); +void* alloc_code_buff (int num_instr); + + + +/* + * The resulting function takes the form: + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + * NOTE: all args passed in ARM registers (A1-A4), + * then copied to R4-R7 (see definitions below). + */ + +#define REG_FUNC_ADDR ARMREG_R4 +#define REG_RETVAL ARMREG_R5 +#define REG_THIS ARMREG_R6 +#define REG_ARGP ARMREG_R7 + + +#define ARG_SIZE sizeof(stackval) + + + +/* + * Invokes mono_string_new for the current AppDomain. + */ +MonoString* mono_string_new_wrapper (const char* text) +{ + return text == NULL ? NULL : mono_string_new(mono_domain_get(), text); +} + + +void flush_icache () +{ +#if defined(_WIN32) + FlushInstructionCache(GetCurrentProcess(), NULL, 0); +#else +# if 0 + asm ("mov r0, r0"); + asm ("mov r0, #0"); + asm ("mcr p15, 0, r0, c7, c7, 0"); +# else + /* TODO: use (movnv pc, rx) method */ +# endif +#endif +} + + +void* alloc_code_buff (int num_instr) +{ + void* code_buff; + +#if defined(_WIN32) || defined(UNDER_CE) + int old_prot = 0; +#endif + + code_buff = malloc(num_instr * sizeof(arminstr_t)); + +#if defined(_WIN32) || defined(UNDER_CE) + VirtualProtect(code_buff, num_instr * sizeof(arminstr_t), PAGE_EXECUTE_READWRITE, &old_prot); +#endif + + return code_buff; +} + + +/* + * Refer to ARM Procedure Call Standard (APCS) for more info. + */ +MonoPIFunc mono_create_trampoline (MonoMethod* method, int runtime) +{ + MonoMethodSignature* sig; + MonoType* param; + MonoPIFunc code_buff; + arminstr_t* p, * utf8_addr, * free_addr, * str_new_addr; + guint32 code_size, stack_size; + guint32 simple_type; + int i, hasthis, aregs, regc, stack_offs; + int utf8_offs, utf8_reg, utf8_stack_offs; + int this_loaded; + int str_args, strc; + guchar reg_alloc [ARM_NUM_ARG_REGS]; + + /* pessimistic estimation for prologue/epilogue size */ + code_size = 16 + 16; + /* push/pop work regs */ + code_size += 2; + /* call */ + code_size += 2; + /* handle retval */ + code_size += 2; + + stack_size = 0; + str_args = 0; + sig = method->signature; + hasthis = sig->hasthis ? 1 : 0; + + aregs = ARM_NUM_ARG_REGS - hasthis; + + for (i = 0, regc = aregs; i < sig->param_count; ++i) { + param = sig->params [i]; + + /* keep track of argument sizes */ + if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0; + + if (param->byref) { + if (regc > 0) { + code_size += 1; + reg_alloc [i] = regc; + --regc; + } else { + code_size += 2; + stack_size += sizeof(gpointer); + } + } else { + simple_type = param->type; +enum_calc_size: + switch (simple_type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_R4: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + if (regc > 0) { + /* register arg */ + code_size += 1; + reg_alloc [i] = regc; + --regc; + } else { + /* stack arg */ + code_size += 2; + stack_size += 4; + } + + if (simple_type == MONO_TYPE_STRING + && !(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) + && !runtime) { + code_size += 6; /* rough */ + ++str_args; + } + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + /* keep track of argument sizes */ + if (regc > 1) { + /* fits into registers, two LDRs */ + code_size += 2; + reg_alloc [i] = regc; + regc -= 2; + } else if (regc > 0) { + /* first half fits into register, one LDR */ + code_size += 1; + reg_alloc [i] = regc; + --regc; + /* the rest on the stack, LDR/STR */ + code_size += 2; + stack_size += 4; + } else { + /* stack arg, 4 instrs - 2x(LDR/STR) */ + code_size += 4; + stack_size += 2 * 4; + } + break; + case MONO_TYPE_VALUETYPE: + if (param->data.klass->enumtype) { + simple_type = param->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (mono_class_value_size(param->data.klass, NULL) != 4) { + g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL)); + } + if (regc > 0) { + /* register arg */ + code_size += 1; + reg_alloc [i] = regc; + --regc; + } else { + /* stack arg */ + code_size += 2; + stack_size += 4; + } + break; + default : + break; + } + } + } + + if (str_args) code_size += 2; + + code_buff = (MonoPIFunc)alloc_code_buff(code_size); + p = (arminstr_t*)code_buff; + + /* prologue */ + p = arm_emit_lean_prologue((arminstr_t*)p, + stack_size + str_args*sizeof(gpointer), + /* save workset (r4-r7) */ + (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); + + + /* copy args into workset */ + /* callme - always present */ + ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1); + /* retval */ + if (sig->ret->byref || (sig->ret->type != MONO_TYPE_VOID)) { + ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2); + } + /* this_obj */ + if (sig->hasthis) { + this_loaded = 0; + if (stack_size == 0) { + ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3); + this_loaded = 1; + } else { + ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3); + } + } + /* args */ + if (sig->param_count != 0) { + ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4); + } + + if (str_args || sig->ret->type == MONO_TYPE_STRING) { + /* branch around address table */ + ARM_B(p, str_args ? 2 : 0); + + /* create branch table for string functions */ + if (str_args) { + /* allocate slots for convert + * and free functions only if + * we have some string args, + * otherwise only string_new + * is needed for retval. + */ + utf8_addr = p; + *p++ = (arminstr_t)&mono_string_to_utf8; + free_addr = p; + *p++ = (arminstr_t)&g_free; + } + str_new_addr = p; + *p++ = (arminstr_t)&mono_string_new_wrapper; + + strc = str_args; /* # of string args */ + } + + stack_offs = stack_size; + utf8_stack_offs = stack_size + str_args*sizeof(gpointer); + + /* handle arguments */ + /* in reverse order so we could use r0 (arg1) for memory transfers */ + for (i = sig->param_count; --i >= 0;) { + param = sig->params [i]; + if (param->byref) { + if (i < aregs) { + ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE); + } else { + stack_offs -= sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R4, REG_ARGP, i*ARG_SIZE); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, stack_offs); + } + } else { + simple_type = param->type; +enum_marshal: + switch (simple_type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_R4: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: +push_a_word: + if (i < aregs && reg_alloc [i] > 0) { + /* pass in register */ + ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); + } else { + stack_offs -= sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + } + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + if (i < aregs && reg_alloc [i] > 0) { + if (reg_alloc [i] > 1) { + /* pass in registers */ + ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); + ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4); + } else { + stack_offs -= sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); + } + } else { + /* two words transferred on the stack */ + stack_offs -= 2*sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4); + } + break; + case MONO_TYPE_STRING: + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + goto push_a_word; + } else { + if (sig->hasthis && this_loaded) { + ARM_MOV_REG_REG(p, REG_THIS, ARMREG_A1); + this_loaded = 0; + } + + if (sig->hasthis && strc == str_args) { + ARM_PUSH(p, (1 << REG_THIS)); + /* adjust stack pointers */ + stack_offs += sizeof(armword_t); + utf8_stack_offs += sizeof(armword_t); + } + + utf8_offs = -(p + 2 - utf8_addr) * sizeof(arminstr_t); + utf8_reg = sig->hasthis ? REG_FUNC_ADDR : REG_THIS; + /* load function address */ + ARM_LDR_IMM(p, utf8_reg, ARMREG_PC, utf8_offs); + /* load MonoString ptr */ + ARM_LDR_IMM(p, ARMREG_A1, REG_ARGP, i*ARG_SIZE); + /* call string_to_utf8 function */ + ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); + ARM_MOV_REG_REG(p, ARMREG_PC, utf8_reg); + + /* count-down string args */ + --strc; + + if (sig->hasthis && strc == 0) { + ARM_POP(p, (1 << REG_THIS)); + /* restore stack pointers */ + stack_offs -= sizeof(armword_t); + utf8_stack_offs -= sizeof(armword_t); + } + + /* maintain list of allocated strings */ + utf8_stack_offs -= sizeof(gpointer); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, utf8_stack_offs); + + if (i < aregs && reg_alloc [i] > 0) { + /* pass in register */ + utf8_reg = ARMREG_A1 + hasthis + (aregs - reg_alloc [i]); + /* result returned in R0, avoid NOPs */ + if (utf8_reg != ARMREG_R0) { + ARM_MOV_REG_REG(p, utf8_reg, ARMREG_R0); + } + } else { + stack_offs -= sizeof(armword_t); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + } + } + break; + case MONO_TYPE_VALUETYPE: + if (param->data.klass->enumtype) { + /* it's an enum value, proceed based on its base type */ + simple_type = param->data.klass->enum_basetype->type; + goto enum_marshal; + } else { + goto push_a_word; + } + break; + + default: + break; + } + } + } + + if (sig->hasthis && !this_loaded) { + /* [this] always passed in A1, regardless of sig->call_convention */ + ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS); + } + + /* call [func] */ + ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); + ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR); + + + /* handle retval */ + if (sig->ret->byref) { + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); + } else { + simple_type = sig->ret->type; +enum_retvalue: + switch (simple_type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0); + break; + /* + * A 32-bit integer and integer-equivalent return value + * is returned in R0. + * Single-precision floating-point values are returned in R0. + */ + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_R4: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_ARRAY: + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); + break; + case MONO_TYPE_STRING: + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + /* return UTF8 string as-is */ + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); + } else { + /* if result is non-null convert it back to MonoString */ + utf8_offs = -(p + 2 - str_new_addr) * sizeof(arminstr_t); + ARM_TEQ_REG_IMM8(p, ARMREG_R0, 0); + /* load mono_string_new_wrapper address */ + ARM_LDR_IMM_COND(p, ARMREG_R2, ARMREG_PC, utf8_offs, ARMCOND_NE); + /* call mono_string_new_wrapper */ + ARM_MOV_REG_REG_COND(p, ARMREG_LR, ARMREG_PC, ARMCOND_NE); + ARM_MOV_REG_REG_COND(p, ARMREG_PC, ARMREG_R2, ARMCOND_NE); + + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); + } + break; + /* + * A 64-bit integer is returned in R0 and R1. + * Double-precision floating-point values are returned in R0 and R1. + */ + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); + ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simple_type = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + break; + case MONO_TYPE_VOID: + break; + default: + break; + } + } + + /* free allocated strings */ + if (str_args) { + utf8_stack_offs = stack_size + str_args*sizeof(gpointer); + for (strc = str_args; --strc >= 0;) { + utf8_stack_offs -= sizeof(gpointer); + /* calc PC-relative offset to function addr */ + utf8_offs = -(p + 2 - free_addr) * sizeof(arminstr_t); + /* load function address */ + ARM_LDR_IMM(p, ARMREG_R2, ARMREG_PC, utf8_offs); + /* load MonoString ptr */ + ARM_LDR_IMM(p, ARMREG_A1, ARMREG_SP, utf8_stack_offs); + /* call free function */ + ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); + ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R2); + } + } + + + p = arm_emit_std_epilogue(p, stack_size + str_args*sizeof(gpointer), + /* restore R4-R7 */ + (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); + + flush_icache(); + +#ifdef ARM_DUMP_DISASM + _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff)); +#endif + + return code_buff; +} + + + +#define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member) + + +/* + * Returns a pointer to a native function that can be used to + * call the specified method. + * The function created will receive the arguments according + * to the call convention specified in the method. + * This function works by creating a MonoInvocation structure, + * filling the fields in and calling ves_exec_method on it. + * Still need to figure out how to handle the exception stuff + * across the managed/unmanaged boundary. + */ +void* mono_create_method_pointer (MonoMethod* method) +{ + MonoMethodSignature* sig; + guchar* p, * p_method, * p_stackval_from_data, * p_exec; + void* code_buff; + int i, stack_size, arg_pos, arg_add, stackval_pos, offs; + int areg, reg_args, shift, pos; + + code_buff = alloc_code_buff(128); + p = (guchar*)code_buff; + + sig = method->signature; + + ARM_B(p, 3); + + /* embed magic number followed by method pointer */ + *p++ = 'M'; + *p++ = 'o'; + *p++ = 'n'; + *p++ = 'o'; + /* method ptr */ + *(void**)p = method; + p_method = p; + p += 4; + + /* call table */ + *(void**)p = stackval_from_data; + p_stackval_from_data = p; + p += 4; + *(void**)p = ves_exec_method; + p_exec = p; + p += 4; + + + stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t); + + /* prologue */ + p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size, + (1 << ARMREG_R4) | + (1 << ARMREG_R5) | + (1 << ARMREG_R6) | + (1 << ARMREG_R7)); + + /* R7 - ptr to stack args */ + ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP); + + /* + * Initialize MonoInvocation fields, first the ones known now. + */ + ARM_MOV_REG_IMM8(p, ARMREG_R4, 0); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex)); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler)); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(child)); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent)); + + /* Set the method pointer. */ + ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2)); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method)); + + if (sig->hasthis) { + /* [this] in A1 */ + ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj)); + } else { + /* else set minv.obj to NULL */ + ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj)); + } + + /* copy args from registers to stack */ + areg = ARMREG_A1 + sig->hasthis; + arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t); + arg_add = 0; + for (i = 0; i < sig->param_count; ++i) { + if (areg >= ARM_NUM_ARG_REGS) break; + ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos); + ++areg; + if (!sig->params[i]->byref) { + switch (sig->params[i]->type) { + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + if (areg >= ARM_NUM_ARG_REGS) { + /* load second half of 64-bit arg */ + ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0); + ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t)); + arg_add = sizeof(armword_t); + } else { + /* second half is already the register */ + ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t)); + ++areg; + } + break; + case MONO_TYPE_VALUETYPE: + /* assert */ + default: + break; + } + } + arg_pos += 2 * sizeof(armword_t); + } + /* number of args passed in registers */ + reg_args = i; + + + + /* + * Calc and save stack args ptr, + * args follow MonoInvocation struct on the stack. + */ + ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation)); + ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args)); + + /* convert method args to stackvals */ + arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t); + stackval_pos = sizeof(MonoInvocation); + for (i = 0; i < sig->param_count; ++i) { + if (i < reg_args) { + ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos); + arg_pos += 2 * sizeof(armword_t); + } else { + if (arg_pos < 0) arg_pos = 0; + pos = arg_pos + arg_add; + if (pos <= 0xFF) { + ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos); + } else { + if (is_arm_const((armword_t)pos)) { + shift = calc_arm_mov_const_shift((armword_t)pos); + ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1); + } else { + p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos); + ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6); + } + } + arg_pos += sizeof(armword_t); + if (!sig->params[i]->byref) { + switch (sig->params[i]->type) { + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + arg_pos += sizeof(armword_t); + break; + case MONO_TYPE_VALUETYPE: + /* assert */ + default: + break; + } + } + } + + /* A2 = result */ + if (stackval_pos <= 0xFF) { + ARM_ADD_REG_IMM8(p, ARMREG_A2, ARMREG_SP, stackval_pos); + } else { + if (is_arm_const((armword_t)stackval_pos)) { + shift = calc_arm_mov_const_shift((armword_t)stackval_pos); + ARM_ADD_REG_IMM(p, ARMREG_A2, ARMREG_SP, stackval_pos >> ((32 - shift) & 31), shift >> 1); + } else { + p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)stackval_pos); + ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_SP, ARMREG_R6); + } + } + + /* A1 = type */ + p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_A1, (armword_t)sig->params [i]); + + stackval_pos += ARG_SIZE; + + offs = -(p + 2*sizeof(arminstr_t) - p_stackval_from_data); + /* load function address */ + ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs); + /* call stackval_from_data */ + ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); + ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4); + } + + /* store retval ptr */ + p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R5, (armword_t)stackval_pos); + ARM_ADD_REG_REG(p, ARMREG_R5, ARMREG_SP, ARMREG_R4); + ARM_STR_IMM(p, ARMREG_R5, ARMREG_SP, MINV_OFFS(retval)); + + /* + * Call the method. + */ + /* A1 = MonoInvocation ptr */ + ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_SP); + offs = -(p + 2*sizeof(arminstr_t) - p_exec); + /* load function address */ + ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs); + /* call ves_exec */ + ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); + ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4); + + + /* + * Move retval into reg. + */ + if (sig->ret->byref) { + ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0); + } else { + switch (sig->ret->type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + ARM_LDRB_IMM(p, ARMREG_R0, ARMREG_R5, 0); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + ARM_LDRH_IMM(p, ARMREG_R0, ARMREG_R5, 0); + break; + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_R4: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_ARRAY: + ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0); + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: + ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0); + ARM_LDR_IMM(p, ARMREG_R1, ARMREG_R5, 4); + break; + case MONO_TYPE_VOID: + default: + break; + } + } + + + p = (guchar*)arm_emit_std_epilogue((arminstr_t*)p, stack_size, + (1 << ARMREG_R4) | + (1 << ARMREG_R5) | + (1 << ARMREG_R6) | + (1 << ARMREG_R7)); + + flush_icache(); + +#ifdef ARM_DUMP_DISASM + _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff)); +#endif + + return code_buff; +} + + +/* + * mono_create_method_pointer () will insert a pointer to the MonoMethod + * so that the interp can easily get at the data: this function will retrieve + * the method from the code stream. + */ +MonoMethod* mono_method_pointer_get (void* code) +{ + unsigned char* c = code; + /* check out magic number that follows unconditional branch */ + if (c[4] == 'M' && + c[5] == 'o' && + c[6] == 'n' && + c[7] == 'o') return ((MonoMethod**)code)[2]; + return NULL; +} + -- cgit v1.1 From f107fb14e6c183972bec81e5727381f44c6a5333 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sun, 24 Feb 2002 20:46:13 +0000 Subject: (mono_create_method_pointer): implements delegates with parameters and return value svn path=/trunk/mono/; revision=2618 --- ChangeLog | 2 + ppc/tramp.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 113 insertions(+), 33 deletions(-) diff --git a/ChangeLog b/ChangeLog index e503aa9..cc3c979 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,8 @@ implementation, it works for simple delegates now and I am already pretty close to have it working for every delegates, but I am going to sleep and finish it tomorrow? + (mono_create_method_pointer): implements delegates with parameters + and return value 2002-02-22 Jeffrey Stedfast diff --git a/ppc/tramp.c b/ppc/tramp.c index 634d581..0e01296 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -600,10 +600,12 @@ mono_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; guint8 *p, *code_buffer; - guint code_size, stack_size, stackval_arg_pos; + guint i, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param; + guint32 simpletype; code_size = 512; stack_size = 512; + stack_param = 0; sig = method->signature; @@ -639,9 +641,18 @@ mono_create_method_pointer (MonoMethod *method) ppc_ori (p, ppc_r0, ppc_r0, (guint32) method & 0xffff); ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), ppc_r31); + local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); + if (sig->hasthis) { ppc_stw (p, ppc_r3, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), ppc_r31); + reg_param = 1; + } else { + ppc_stw (p, ppc_r3, local_pos, ppc_r31); + local_pos += 4; + reg_param = 0; } + ppc_stw (p, ppc_r4, local_pos, ppc_r31); local_pos += 4; + ppc_stw (p, ppc_r5, local_pos, ppc_r31); local_pos += 4; /* set MonoInvocation::stack_args */ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); @@ -649,38 +660,71 @@ mono_create_method_pointer (MonoMethod *method) ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), ppc_r31); /* add stackval arguments */ - /* for (i = 0; i < sig->param_count; ++i) { - - - - ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - - x86_mov_reg_imm (p, X86_ECX, stackval_from_data); - x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); - x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); - x86_push_reg (p, X86_EDX); - x86_push_reg (p, X86_EAX); - x86_push_imm (p, sig->params [i]); - x86_call_reg (p, X86_ECX); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 12); - stackval_pos += sizeof (stackval); - arg_pos += 4; - if (!sig->params [i]->byref) { - switch (sig->params [i]->type) { - case MONO_TYPE_I8: - case MONO_TYPE_R8: - arg_pos += 4; - break; - case MONO_TYPE_VALUETYPE: - g_assert_not_reached (); Not implemented yet. - default: - break; - } + for (i = 0; i < sig->param_count; ++i) { +#define CALL_STACKVAL_FROM_DATA \ + ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); \ + ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); \ + ppc_mtlr (p, ppc_r0); \ + ppc_blrl (p) +#define CALL_SIZE_4 \ + if (reg_param < 3 - (sig->hasthis ? 1 : 0)) { \ + ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - (sig->hasthis ? 1 : 0))*4); \ + reg_param ++; \ + } else if (reg_param < 8) { \ + ppc_stw (p, ppc_r3 + reg_param, local_pos, ppc_r31); \ + ppc_addi (p, ppc_r5, ppc_r31, local_pos); \ + reg_param ++; \ + } else { \ + ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); \ + stack_param ++; \ + } \ + ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); \ + ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); \ + stackval_arg_pos ++; \ + ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); \ +\ + CALL_STACKVAL_FROM_DATA + + if (sig->params [i]->byref) { + CALL_SIZE_4; + continue; + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + CALL_SIZE_4; + break; + case MONO_TYPE_VALUETYPE: + NOT_IMPLEMENTED ("value type"); + break; + case MONO_TYPE_I8: + NOT_IMPLEMENTED ("i8"); + break; + case MONO_TYPE_R4: + NOT_IMPLEMENTED ("r4"); + break; + case MONO_TYPE_R8: + NOT_IMPLEMENTED ("r8"); + break; + default: + g_error ("Can't delegate 0x%x type", sig->params [i]->type); } - } */ + } /* return value storage */ if (sig->param_count) { @@ -696,7 +740,41 @@ mono_create_method_pointer (MonoMethod *method) ppc_blrl (p); /* move retval from stackval to proper place (r3/r4/...) */ - /* TODO */ + if (sig->ret->byref) { + ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); + } else { + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); + break; + case MONO_TYPE_I8: + ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); + ppc_lwz (p, ppc_r4, stackval_arg_pos + 1, ppc_r31); + break; + case MONO_TYPE_R4: + ppc_lfs (p, ppc_f1, stackval_arg_pos, ppc_r31); + break; + case MONO_TYPE_R8: + ppc_lfd (p, ppc_f1, stackval_arg_pos, ppc_r31); + break; + default: + g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); + break; + } + } /* epilog */ ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ -- cgit v1.1 From f703ca24db3d380b37434e9f1cced6d0b45a5470 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Mon, 25 Feb 2002 08:56:57 +0000 Subject: * Makefile.am: added arm to DIST_SUBDIRS. svn path=/trunk/mono/; revision=2627 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index fc76039..5f14ee9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc +DIST_SUBDIRS = x86 ppc sparc arm INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From a8b6a875977b2728019ea7cf2ea8dd432fe4469a Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Mon, 25 Feb 2002 08:58:43 +0000 Subject: * ChangeLog: ARM-related log entry. svn path=/trunk/mono/; revision=2628 --- ChangeLog | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index cc3c979..f42d38e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-02-25 Sergey Chaban + + * arm: added ARM support code. + * Makefile.am: added arm to DIST_SUBDIRS. + 2002-02-24 Radek Doulik * ppc/tramp.c (mono_create_method_pointer): basic delegates -- cgit v1.1 From 29f73f5799fb9274a44c918cb4f63c606f765b96 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Wed, 27 Feb 2002 09:12:27 +0000 Subject: * Makefile.am: removed SCRIPT_SOURCES to fix automake issues. svn path=/trunk/mono/; revision=2710 --- arm/Makefile.am | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arm/Makefile.am b/arm/Makefile.am index 851c436..8608db7 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -5,7 +5,6 @@ noinst_LTLIBRARIES = libmonoarch-arm.la BUILT_SOURCES = arm_dpimacros.h -SCRIPT_SOURCES = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ tramp.c \ @@ -15,10 +14,10 @@ libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ arm-dis.h \ # -arm_dpimacros.h: $(SCRIPT_SOURCES) +arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th bash ./dpiops.sh CLEANFILES = $(BUILT_SOURCES) -EXTRA_DIST = $(SCRIPT_SOURCES) +EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th -- cgit v1.1 From d0370e0ab841b63f60170f3afcae9ee49e9faade Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 28 Feb 2002 07:43:49 +0000 Subject: Thu Feb 28 12:34:21 CET 2002 Paolo Molaro * x86/tramp.c: start handling of more complex marshaling stuff. Thu Feb 28 12:33:41 CET 2002 Paolo Molaro * marshal.c, marshal.h: start of marshaling interface. svn path=/trunk/mono/; revision=2759 --- ChangeLog | 5 +++++ x86/tramp.c | 50 +++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index f42d38e..da4892b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Thu Feb 28 12:34:21 CET 2002 Paolo Molaro + + * x86/tramp.c: start handling of more complex marshaling stuff. + 2002-02-25 Sergey Chaban * arm: added ARM support code. diff --git a/x86/tramp.c b/x86/tramp.c index 8c861cd..25aa7f0 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -14,6 +14,7 @@ #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" +#include "mono/metadata/marshal.h" /* * The resulting function takes the form: @@ -43,6 +44,13 @@ mono_create_trampoline (MonoMethod *method, int runtime) guint32 local_size = 0, stack_size = 0, code_size = 30; guint32 arg_pos, simpletype; int i, stringp; + int need_marshal; + GList *free_locs = NULL; + + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) + need_marshal = 0; + else + need_marshal = 1; sig = method->signature; @@ -54,7 +62,8 @@ mono_create_trampoline (MonoMethod *method, int runtime) for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { stack_size += sizeof (gpointer); - code_size += i < 10 ? 5 : 8; + code_size += 20; + local_size++; continue; } simpletype = sig->params [i]->type; @@ -141,7 +150,31 @@ enum_calc_size: for (i = sig->param_count; i; --i) { arg_pos = ARG_SIZE * (i - 1); if (sig->params [i - 1]->byref) { - x86_push_membase (p, X86_EDX, arg_pos); + if (!need_marshal) { + x86_push_membase (p, X86_EDX, arg_pos); + continue; + } + if (sig->params [i - 1]->type == MONO_TYPE_SZARRAY && + sig->params [i - 1]->data.type->type == MONO_TYPE_STRING) { + x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_push_regp (p, X86_EAX); + x86_mov_reg_imm (p, X86_EDX, mono_marshal_string_array); + x86_call_reg (p, X86_EDX); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); + /* + * Store the pointer in a local we'll free later. + */ + stringp++; + x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); + free_locs = g_list_prepend (free_locs, GUINT_TO_POINTER (LOC_POS * stringp)); + /* load the pointer and push it */ + x86_lea_membase (p, X86_EAX, X86_EBP, LOC_POS * stringp); + x86_push_reg (p, X86_EAX); + /* restore pointer to args in EDX */ + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); + } else { + x86_push_membase (p, X86_EDX, arg_pos); + } continue; } simpletype = sig->params [i - 1]->type; @@ -185,7 +218,7 @@ enum_marshal: * If it is an internalcall we assume it's the object we want. * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. */ - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + if (!need_marshal) { x86_push_membase (p, X86_EDX, arg_pos); break; } @@ -200,6 +233,7 @@ enum_marshal: */ stringp++; x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); + free_locs = g_list_prepend (free_locs, GUINT_TO_POINTER (LOC_POS * stringp)); /* * we didn't save the reg: restore it here. */ @@ -267,7 +301,7 @@ enum_retvalue: x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + if (!need_marshal) { x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; @@ -312,12 +346,14 @@ enum_retvalue: /* * free the allocated strings. */ - if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { - for (i = 1; i <= local_size; ++i) { + if (need_marshal) { + GList* tmp; + for (tmp = free_locs; tmp; tmp = tmp->next) { x86_mov_reg_imm (p, X86_EDX, g_free); - x86_push_membase (p, X86_EBP, LOC_POS * i); + x86_push_membase (p, X86_EBP, GPOINTER_TO_UINT (tmp->data)); x86_call_reg (p, X86_EDX); } + g_list_free (free_locs); } /* * Standard epilog. -- cgit v1.1 From 51d24bbb570af055b885dfe9f06e7717e4bb3b98 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 28 Feb 2002 09:35:29 +0000 Subject: impl. more CONV opcodes svn path=/trunk/mono/; revision=2761 --- x86/x86-codegen.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index b57e50e..a50a103 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.22 2002/02/11 07:42:10 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.23 2002/02/28 09:35:28 dietmar Exp $ */ #ifndef X86_H @@ -1036,7 +1036,7 @@ typedef union { x86_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) -#define x86_fld80(inst,mem) \ +#define x86_fld80_mem(inst,mem) \ do { \ *(inst)++ = (unsigned char)0xdb; \ x86_mem_emit ((inst), 5, (mem)); \ @@ -1100,6 +1100,20 @@ typedef union { x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \ } while (0) +#define x86_fst80_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 7, (mem)); \ + } while (0) + + +#define x86_fst80_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } while (0) + + #define x86_fist_pop(inst,mem,is_long) \ do { \ if ((is_long)) { \ -- cgit v1.1 From 670be867554bb6f1ed61a17649e21d0e25f66105 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 11 Mar 2002 11:24:33 +0000 Subject: Mon Mar 11 16:14:29 CET 2002 Paolo Molaro * x86/x86-codegen.h: addex x86_clear_reg() and changed x86_mov_reg_imm() to not check for imm == 0. svn path=/trunk/mono/; revision=3051 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 15 ++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index da4892b..b40afa1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Mar 11 16:14:29 CET 2002 Paolo Molaro + + * x86/x86-codegen.h: addex x86_clear_reg() and changed + x86_mov_reg_imm() to not check for imm == 0. + Thu Feb 28 12:34:21 CET 2002 Paolo Molaro * x86/tramp.c: start handling of more complex marshaling stuff. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index a50a103..461bef0 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.23 2002/02/28 09:35:28 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.24 2002/03/11 11:24:33 lupus Exp $ */ #ifndef X86_H @@ -814,14 +814,15 @@ typedef union { x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ } while (0) +/* + * Note: x86_clear_reg () chacnges the condition code! + */ +#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg)) + #define x86_mov_reg_imm(inst,reg,imm) \ do { \ - if ((imm) == 0) { \ - x86_alu_reg_reg ((inst), X86_XOR, (reg), (reg)); \ - } else { \ - *(inst)++ = (unsigned char)0xb8 + (reg); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ + *(inst)++ = (unsigned char)0xb8 + (reg); \ + x86_imm_emit32 ((inst), (imm)); \ } while (0) #define x86_mov_mem_imm(inst,mem,imm,size) \ -- cgit v1.1 From af361d9d30702937e3cd9412b987552f4652887a Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 14 Mar 2002 09:52:53 +0000 Subject: 2002-03-14 Dietmar Maurer * emit-x86.c (arch_create_native_wrapper): new code to generate wrappers for calling native functions. * icall.c (ves_icall_InternalInvoke): impl. svn path=/trunk/mono/; revision=3103 --- ChangeLog | 4 ++++ x86/tramp.c | 17 +++-------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/ChangeLog b/ChangeLog index b40afa1..e1afd2f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-03-14 Dietmar Maurer + + * x86/tramp.c (mono_create_trampoline): dont use fld/fst to copy + R8 values Mon Mar 11 16:14:29 CET 2002 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index 25aa7f0..794e0ad 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -28,14 +28,6 @@ #define ARG_SIZE sizeof (stackval) -static MonoString* -mono_string_new_wrapper (const char *text) -{ - MonoDomain *domain = mono_domain_get (); - - return mono_string_new (domain, text); -} - MonoPIFunc mono_create_trampoline (MonoMethod *method, int runtime) { @@ -155,7 +147,7 @@ enum_calc_size: continue; } if (sig->params [i - 1]->type == MONO_TYPE_SZARRAY && - sig->params [i - 1]->data.type->type == MONO_TYPE_STRING) { + sig->params [i - 1]->data.type->type == MONO_TYPE_STRING) { x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); x86_push_regp (p, X86_EAX); x86_mov_reg_imm (p, X86_EDX, mono_marshal_string_array); @@ -208,11 +200,6 @@ enum_marshal: goto enum_marshal; } break; - case MONO_TYPE_R8: - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 8); - x86_fld_membase (p, X86_EDX, arg_pos, TRUE); - x86_fst_membase (p, X86_ESP, 0, TRUE, TRUE); - break; case MONO_TYPE_STRING: /* * If it is an internalcall we assume it's the object we want. @@ -241,6 +228,8 @@ enum_marshal: x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); break; case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_R8: x86_push_membase (p, X86_EDX, arg_pos + 4); x86_push_membase (p, X86_EDX, arg_pos); break; -- cgit v1.1 From 793cfcbae98d4847ff08aff44ffa27020260c317 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 16 Mar 2002 14:37:28 +0000 Subject: Sat Mar 16 19:12:57 CET 2002 Paolo Molaro * x86/tramp.c: increase default allocated size for trampolines and assert on overflow. svn path=/trunk/mono/; revision=3143 --- ChangeLog | 6 ++++++ x86/tramp.c | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index e1afd2f..48b847d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ + +Sat Mar 16 19:12:57 CET 2002 Paolo Molaro + + * x86/tramp.c: increase default allocated size for trampolines + and assert on overflow. + 2002-03-14 Dietmar Maurer * x86/tramp.c (mono_create_trampoline): dont use fld/fst to copy diff --git a/x86/tramp.c b/x86/tramp.c index 794e0ad..620aad2 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -33,7 +33,7 @@ mono_create_trampoline (MonoMethod *method, int runtime) { MonoMethodSignature *sig; unsigned char *p, *code_buffer; - guint32 local_size = 0, stack_size = 0, code_size = 30; + guint32 local_size = 0, stack_size = 0, code_size = 50; guint32 arg_pos, simpletype; int i, stringp; int need_marshal; @@ -350,6 +350,7 @@ enum_retvalue: x86_leave (p); x86_ret (p); + g_assert (p - code_buffer < code_size); return g_memdup (code_buffer, p - code_buffer); } -- cgit v1.1 From 3f3f1e23c3cced2e37ec49361ee3236c524ed107 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sat, 30 Mar 2002 11:19:26 +0000 Subject: fixed compiler warnings svn path=/trunk/mono/; revision=3514 --- x86/x86-codegen.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 461bef0..c08df80 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.24 2002/03/11 11:24:33 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.25 2002/03/30 11:19:25 dietmar Exp $ */ #ifndef X86_H @@ -1346,9 +1346,9 @@ typedef union { #define x86_call_code(inst,target) \ do { \ - int offset = (unsigned char*)(target) - (inst); \ - offset -= 5; \ - x86_call_imm ((inst), offset); \ + int _x86_offset = (unsigned char*)(target) - (inst); \ + _x86_offset -= 5; \ + x86_call_imm ((inst), _x86_offset); \ } while (0) #define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) -- cgit v1.1 From bf0fa05ecc5f3537597c10704414544c50d3a0ed Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 4 Apr 2002 04:42:46 +0000 Subject: Remove useless comments in rules. svn path=/trunk/mono/; revision=3595 --- arm/Makefile.am | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arm/Makefile.am b/arm/Makefile.am index 8608db7..afce5cd 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -11,8 +11,7 @@ libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ arm-codegen.c \ arm-codegen.h \ arm-dis.c \ - arm-dis.h \ - # + arm-dis.h arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th bash ./dpiops.sh -- cgit v1.1 From 9116ce23467ea863a99b860849d867802c32187a Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 6 Apr 2002 10:40:58 +0000 Subject: Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro * x86/tramp.c: fix advancement od argument position on the stack. svn path=/trunk/mono/; revision=3652 --- ChangeLog | 4 ++++ x86/tramp.c | 16 ++-------------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/ChangeLog b/ChangeLog index 48b847d..4b6b91e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix advancement od argument position on the stack. + Sat Mar 16 19:12:57 CET 2002 Paolo Molaro * x86/tramp.c: increase default allocated size for trampolines diff --git a/x86/tramp.c b/x86/tramp.c index 620aad2..74c5eba 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -376,7 +376,7 @@ mono_create_method_pointer (MonoMethod *method) unsigned char *p, *code_buffer; gint32 local_size; gint32 stackval_pos, arg_pos = 8; - int i; + int i, align; /* * If it is a static P/Invoke method, we can just return the pointer @@ -444,19 +444,7 @@ mono_create_method_pointer (MonoMethod *method) x86_call_reg (p, X86_ECX); x86_alu_reg_imm (p, X86_SUB, X86_ESP, 12); stackval_pos += sizeof (stackval); - arg_pos += 4; - if (!sig->params [i]->byref) { - switch (sig->params [i]->type) { - case MONO_TYPE_I8: - case MONO_TYPE_R8: - arg_pos += 4; - break; - case MONO_TYPE_VALUETYPE: - g_assert_not_reached (); /* Not implemented yet. */ - default: - break; - } - } + arg_pos += mono_type_stack_size (sig->params [i], &align); } /* -- cgit v1.1 From d4ccb473cf835fd07294b7da6a6d4da9e2022dcd Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 10 Apr 2002 12:34:16 +0000 Subject: Forgot to commit. svn path=/trunk/mono/; revision=3740 --- ChangeLog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 4b6b91e..cd912d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,7 +1,7 @@ Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro - * x86/tramp.c: fix advancement od argument position on the stack. + * x86/tramp.c: fix advancement of argument position on the stack. Sat Mar 16 19:12:57 CET 2002 Paolo Molaro -- cgit v1.1 From ab877e78de2c3ac01664dc13c13c2f231fca4c11 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Sat, 20 Apr 2002 14:32:46 +0000 Subject: 2002-04-20 Dietmar Maurer * interp.c (ves_exec_method): support internalcall String constructors svn path=/trunk/mono/; revision=3925 --- ChangeLog | 4 ++++ x86/tramp.c | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index cd912d0..3bfd8f7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-04-20 Dietmar Maurer + + * x86/tramp.c (mono_create_trampoline): support internalcall + String constructors Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index 74c5eba..baae8e9 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -259,7 +259,10 @@ enum_marshal: * Long integers are in EAX:EDX. * FP values are on the FP stack. */ - if (sig->ret->byref) { + + if (sig->ret->byref || + (method->klass == mono_defaults.string_class && + *method->name == '.' && !strcmp (method->name, ".ctor"))) { x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); } else { -- cgit v1.1 From cc03dca33b721c5b46cba47ff7a7bb80b820be6d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 22 Apr 2002 07:32:11 +0000 Subject: Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro * x86/x86-codegen.h: added loop instructions and made x86_patch fully useful. svn path=/trunk/mono/; revision=3950 --- ChangeLog | 6 ++++++ x86/test.c | 6 +++++- x86/x86-codegen.h | 56 ++++++++++++++++++++++++++++++++++++++++++++++--------- 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3bfd8f7..cf08241 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ + +Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro + + * x86/x86-codegen.h: added loop instructions and made x86_patch fully + useful. + 2002-04-20 Dietmar Maurer * x86/tramp.c (mono_create_trampoline): support internalcall diff --git a/x86/test.c b/x86/test.c index a9695dc..3511e8f 100644 --- a/x86/test.c +++ b/x86/test.c @@ -8,7 +8,7 @@ int main() { unsigned char code [16000]; unsigned char *p = code; - unsigned char *target; + unsigned char *target, *start, *end; unsigned long mem_addr = 0xdeadbeef; int size, i; @@ -174,8 +174,12 @@ int main() { target = p; + start = p; x86_jump32 (p, mem_addr); + x86_patch (start, target); + start = p; x86_jump8 (p, 12); + x86_patch (start, target); x86_jump_reg (p, X86_EAX); x86_jump_membase (p, X86_EDX, 16); diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index c08df80..e824e48 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.25 2002/03/30 11:19:25 dietmar Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.26 2002/04/22 07:32:11 lupus Exp $ */ #ifndef X86_H @@ -260,15 +260,33 @@ typedef union { } \ } while (0) -/* disp will need to be relative to the start position... */ -#define x86_patch(ins,disp) \ +/* + * target is the position in the code where to jump to: + * target = code; + * .. output loop code... + * x86_mov_reg_imm (code, X86_EAX, 0); + * loop = code; + * x86_loop (code, -1); + * ... finish method + * + * patch displacement + * x86_patch (loop, target); + * + * ins should point at the start of the instruction that encodes a target. + * the instruction is inspected for validity and the correct displacement + * is inserted. + */ +#define x86_patch(ins,target) \ do { \ unsigned char* pos = (ins) + 1; \ - int size = 0; \ + int disp, size = 0; \ switch (*(ins)) { \ - case 0xe9: ++size; break; \ - case 0x0f: ++size; ++pos; break; \ - case 0xeb: \ + case 0xe9: ++size; break; /* jump32 */ \ + case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x7f)) assert (0); \ + ++size; ++pos; break; /* prefix for 32-bit disp */ \ + case 0xe0: case 0xe1: case 0xe2: /* loop */ \ + case 0xeb: /* jump8 */ \ + /* conditional jump opcodes */ \ case 0x70: case 0x71: case 0x72: case 0x73: \ case 0x74: case 0x75: case 0x76: case 0x77: \ case 0x78: case 0x79: case 0x7a: case 0x7b: \ @@ -276,8 +294,10 @@ typedef union { break; \ default: assert (0); \ } \ - if (size) x86_imm_emit32 (pos, (disp)); \ - else x86_imm_emit8 (pos, (disp)); \ + disp = (target) - pos; \ + if (size) x86_imm_emit32 (pos, disp - 4); \ + else if (x86_is_imm8 (disp)) x86_imm_emit8 (pos, disp - 1); \ + else assert (0); \ } while (0) #define x86_breakpoint(inst) \ @@ -1194,6 +1214,24 @@ typedef union { #define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0) #define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0) +#define x86_loop(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe2; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_loope(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe1; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_loopne(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe0; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + #define x86_jump32(inst,imm) \ do { \ *(inst)++ = (unsigned char)0xe9; \ -- cgit v1.1 From d8cf0bf0270efb923d7c6e80c4e5d547d1161740 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 29 Apr 2002 12:14:39 +0000 Subject: Removed mono_string_new_wrapper(). svn path=/trunk/mono/; revision=4151 --- arm/tramp.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arm/tramp.c b/arm/tramp.c index 0a7c47d..3e5af33 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -22,7 +22,6 @@ /* prototypes for private functions (to avoid compiler warnings) */ -MonoString* mono_string_new_wrapper (const char* text); void flush_icache (void); void* alloc_code_buff (int num_instr); @@ -45,14 +44,6 @@ void* alloc_code_buff (int num_instr); -/* - * Invokes mono_string_new for the current AppDomain. - */ -MonoString* mono_string_new_wrapper (const char* text) -{ - return text == NULL ? NULL : mono_string_new(mono_domain_get(), text); -} - void flush_icache () { -- cgit v1.1 From 944736b70eb0689f094fe05c7184d36f7b7421bf Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 3 May 2002 12:52:19 +0000 Subject: Added some missing FP opcodes and made x86_patch() handle also the call opcode. svn path=/trunk/mono/; revision=4252 --- x86/x86-codegen.h | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index e824e48..83930b1 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.26 2002/04/22 07:32:11 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.27 2002/05/03 12:52:19 lupus Exp $ */ #ifndef X86_H @@ -281,7 +281,7 @@ typedef union { unsigned char* pos = (ins) + 1; \ int disp, size = 0; \ switch (*(ins)) { \ - case 0xe9: ++size; break; /* jump32 */ \ + case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \ case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x7f)) assert (0); \ ++size; ++pos; break; /* prefix for 32-bit disp */ \ case 0xe0: case 0xe1: case 0xe2: /* loop */ \ @@ -966,6 +966,12 @@ typedef union { x86_mem_emit ((inst), (opc), (mem)); \ } while (0) +#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } while (0) + #define x86_fp_op(inst,opc,index) \ do { \ *(inst)++ = (unsigned char)0xd8; \ @@ -991,6 +997,12 @@ typedef union { *(inst)++ = (unsigned char)0xd9; \ } while (0) +#define x86_fucompp(inst) \ + do { \ + *(inst)++ = (unsigned char)0xda; \ + *(inst)++ = (unsigned char)0xe9; \ + } while (0) + #define x86_fnstsw(inst) \ do { \ *(inst)++ = (unsigned char)0xdf; \ @@ -1039,12 +1051,30 @@ typedef union { *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \ } while (0) +#define x86_fcomi(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ + } while (0) + #define x86_fcomip(inst,index) \ do { \ *(inst)++ = (unsigned char)0xdf; \ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ } while (0) +#define x86_fucomi(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ + } while (0) + +#define x86_fucomip(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ + } while (0) + #define x86_fld(inst,mem,is_double) \ do { \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ -- cgit v1.1 From 9d1e2b5076d08bd02eb28ad8b3f2a27a42449250 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Mon, 6 May 2002 16:33:54 +0000 Subject: * x86-codegen.h: added missing shifts; 8-bit ALU operations; FPU ops with integer operand; FIST (without pop); svn path=/trunk/mono/; revision=4343 --- x86/x86-codegen.h | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 83930b1..78b87cc 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.27 2002/05/03 12:52:19 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.28 2002/05/06 16:33:54 serge Exp $ */ #ifndef X86_H @@ -41,6 +41,10 @@ typedef enum { typedef enum { X86_SHLD, X86_SHLR, + X86_ROL = 0, + X86_ROR = 1, + X86_RCL = 2, + X86_RCR = 3, X86_SHL = 4, X86_SHR = 5, X86_SAR = 7, @@ -208,6 +212,7 @@ typedef union { #define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1))) #define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0) +#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0) #define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0) #define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) @@ -493,6 +498,21 @@ typedef union { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) +/** + * @x86_alu_reg8_reg8: + * Supports ALU operations between two 8-bit registers. + * dreg := dreg opc reg + * X86_Reg_No enum is used to specify the registers. + * Additionally is_*_h flags are used to specify what part + * of a given 32-bit register is used - high (TRUE) or low (FALSE). + * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH + */ +#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \ + x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \ + } while (0) + #define x86_alu_reg_mem(inst,opc,reg,mem) \ do { \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ @@ -985,6 +1005,19 @@ typedef union { *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \ } while (0) +/** + * @x86_fp_int_op_mem + * Supports FPU operations between ST(0) and integer operand in memory. + * Operation encoded using X86_FP_Opcode enum. + * Operand is addressed by [basereg + disp]. + * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE). + */ +#define x86_fp_int_op_mem(inst,opc,basereg,disp,is_int) \ + do { \ + *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \ + x86_membase_emit ((inst), opc, (basereg), (disp)); \ + } while (0) + #define x86_fstp(inst,index) \ do { \ *(inst)++ = (unsigned char)0xdd; \ @@ -1187,6 +1220,24 @@ typedef union { } \ } while (0) +/** + * @x86_fist_membase + * Converts content of ST(0) to integer and stores it at memory location + * addressed by [basereg + disp]. + * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE). + */ +#define x86_fist_membase(inst,basereg,disp,is_int) \ + do { \ + if ((is_int)) { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } \ + } while (0) + + #define x86_push_reg(inst,reg) \ do { \ *(inst)++ = (unsigned char)0x50 + (reg); \ -- cgit v1.1 From 512203d918c6998f9652d23301b553c2bb205788 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Mon, 6 May 2002 16:39:01 +0000 Subject: Logged changes to x86-codegen.h svn path=/trunk/mono/; revision=4344 --- ChangeLog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index cf08241..1b1f54c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2002-05-06 Sergey Chaban + + * x86/x86-codegen.h: added missing shifts; + 8-bit ALU operations (reg-reg); + macro for FPU ops with integer operand; + FIST macro (without pop); Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro -- cgit v1.1 From 5d0a1992c7fe0252457f6644198654d06ee7a19f Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 10 May 2002 07:24:08 +0000 Subject: Fix checks in x86_patch(). svn path=/trunk/mono/; revision=4473 --- x86/x86-codegen.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 78b87cc..17aff24 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.28 2002/05/06 16:33:54 serge Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.29 2002/05/10 07:24:08 lupus Exp $ */ #ifndef X86_H @@ -285,9 +285,9 @@ typedef union { do { \ unsigned char* pos = (ins) + 1; \ int disp, size = 0; \ - switch (*(ins)) { \ + switch (*(unsigned char*)(ins)) { \ case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \ - case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x7f)) assert (0); \ + case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \ ++size; ++pos; break; /* prefix for 32-bit disp */ \ case 0xe0: case 0xe1: case 0xe2: /* loop */ \ case 0xeb: /* jump8 */ \ @@ -301,7 +301,7 @@ typedef union { } \ disp = (target) - pos; \ if (size) x86_imm_emit32 (pos, disp - 4); \ - else if (x86_is_imm8 (disp)) x86_imm_emit8 (pos, disp - 1); \ + else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \ else assert (0); \ } while (0) -- cgit v1.1 From 9fb095d7866ee9963f11e3bd2dcc9b9930320ddc Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Fri, 10 May 2002 13:39:09 +0000 Subject: updated for new strings svn path=/trunk/mono/; revision=4484 --- ppc/tramp.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 0e01296..c430a93 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -167,7 +167,9 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint } } - if (sig->ret->byref) { + if (sig->ret->byref || + (method->klass == mono_defaults.string_class && + *method->name == '.' && !strcmp (method->name, ".ctor"))) { *code_size += 8; } else { simpletype = sig->ret->type; @@ -411,11 +413,11 @@ alloc_code_memory (guint code_size) return p; } -static MonoString* +/* static MonoString* mono_string_new_wrapper (const char *text) { return text ? mono_string_new (mono_domain_get (), text) : NULL; -} +} */ static inline guint8 * emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) @@ -428,7 +430,9 @@ emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint stack_size, gui ppc_blrl (p); /* get return value */ - if (sig->ret->byref) { + if (sig->ret->byref || + (method->klass == mono_defaults.string_class && + *method->name == '.' && !strcmp (method->name, ".ctor"))) { ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ } else { -- cgit v1.1 From 8d20a830d50aaf3f30869283332d654472f16890 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Fri, 10 May 2002 19:25:15 +0000 Subject: * x86-codegen.h: renamed FP int macro for consistency (its arg is really a membase, not mem); svn path=/trunk/mono/; revision=4500 --- x86/x86-codegen.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 17aff24..571a027 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,7 +1,7 @@ /* Copyright (C) 2000 Intel Corporation. All rights reserved. Copyright (C) 2001 Ximian, Inc. // -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.29 2002/05/10 07:24:08 lupus Exp $ +// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.30 2002/05/10 19:25:15 serge Exp $ */ #ifndef X86_H @@ -1006,13 +1006,13 @@ typedef union { } while (0) /** - * @x86_fp_int_op_mem + * @x86_fp_int_op_membase * Supports FPU operations between ST(0) and integer operand in memory. * Operation encoded using X86_FP_Opcode enum. * Operand is addressed by [basereg + disp]. * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE). */ -#define x86_fp_int_op_mem(inst,opc,basereg,disp,is_int) \ +#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \ do { \ *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \ x86_membase_emit ((inst), opc, (basereg), (disp)); \ -- cgit v1.1 From 8e8d0cf9ac1f4aa46da775bed8da214581345ddb Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Mon, 13 May 2002 17:24:04 +0000 Subject: introduced DEBUG, disabled by default svn path=/trunk/mono/; revision=4599 --- ppc/tramp.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index c430a93..5697c86 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -21,6 +21,8 @@ #endif #endif +#define DEBUG(x) + /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) { @@ -226,7 +228,7 @@ enum_retvalue: } } /* align stack size to 16 */ - printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size); + DEBUG (printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size)); *stack_size = (*stack_size + 15) & ~15; } @@ -408,7 +410,7 @@ alloc_code_memory (guint code_size) #else p = g_malloc (code_size); #endif - printf (" align: %p (%d)\n", p, (guint)p % 4); + DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4)); return p; } @@ -551,7 +553,7 @@ mono_create_trampoline (MonoMethod *method, int runtime) guint8 *p, *code_buffer; guint stack_size, code_size, strings; - printf ("\nPInvoke [start emiting] %s\n", method->name); + DEBUG (printf ("\nPInvoke [start emiting] %s\n", method->name)); calculate_sizes (method, &stack_size, &code_size, &strings, runtime); p = code_buffer = alloc_code_memory (code_size); @@ -574,10 +576,10 @@ mono_create_trampoline (MonoMethod *method, int runtime) } #endif - printf ("emited code size: %d\n", p - code_buffer); + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); flush_icache (code_buffer, p - code_buffer); - printf ("PInvoke [end emiting]\n"); + DEBUG (printf ("PInvoke [end emiting]\n")); return (MonoPIFunc) code_buffer; /* return fake_func; */ @@ -615,7 +617,7 @@ mono_create_method_pointer (MonoMethod *method) p = code_buffer = g_malloc (code_size); - printf ("\nDelegate [start emiting] %s\n", method->name); + DEBUG (printf ("\nDelegate [start emiting] %s\n", method->name)); /* jump after header which consist of "Mono" + method ptr */ ppc_b (p, 3); @@ -788,10 +790,10 @@ mono_create_method_pointer (MonoMethod *method) ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ ppc_blr (p); /* return */ - printf ("emited code size: %d\n", p - code_buffer); + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); flush_icache (code_buffer, p - code_buffer); - printf ("Delegate [end emiting]\n"); + DEBUG (printf ("Delegate [end emiting]\n")); return (MonoPIFunc) code_buffer; } -- cgit v1.1 From 89d436d12d5746d04d9f27d9897853f846d0500e Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Mon, 13 May 2002 19:00:42 +0000 Subject: 2002-05-13 Radek Doulik * ppc/tramp.c (emit_save_parameters): fix I8 parameters svn path=/trunk/mono/; revision=4601 --- ChangeLog | 4 ++++ ppc/tramp.c | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 1b1f54c..a66edf3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-05-13 Radek Doulik + + * ppc/tramp.c (emit_save_parameters): fix I8 parameters + 2002-05-06 Sergey Chaban * x86/x86-codegen.h: added missing shifts; diff --git a/ppc/tramp.c b/ppc/tramp.c index 5697c86..f0db45d 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -364,10 +364,9 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str if (gr < 7) { if (gr & 1) gr ++; - g_warning ("check endianess"); ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); gr ++; - ppc_lwz (p, ppc_r3 + gr, i*17, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr, i*16 + 4, ARG_BASE); gr ++; } else { NOT_IMPLEMENTED ("i8 on stack"); @@ -485,7 +484,6 @@ enum_retvalue: ppc_stfd (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ break; case MONO_TYPE_I8: - g_warning ("check endianess"); ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ ppc_stw (p, ppc_r4, 4, ppc_r9); /* save return value (r3) to "retval" */ -- cgit v1.1 From be70e94a20c2c1864f829122085bce03f24cc4e8 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Wed, 15 May 2002 14:19:24 +0000 Subject: fixed delegates return values svn path=/trunk/mono/; revision=4662 --- ppc/tramp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index f0db45d..72d5530 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -745,6 +745,7 @@ mono_create_method_pointer (MonoMethod *method) /* move retval from stackval to proper place (r3/r4/...) */ if (sig->ret->byref) { + DEBUG (printf ("ret by ref\n")); ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); } else { switch (sig->ret->type) { @@ -753,8 +754,12 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: + ppc_lbz (p, ppc_r3, stackval_arg_pos, ppc_r31); + break; case MONO_TYPE_I2: case MONO_TYPE_U2: + ppc_lhz (p, ppc_r3, stackval_arg_pos, ppc_r31); + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: @@ -766,7 +771,7 @@ mono_create_method_pointer (MonoMethod *method) break; case MONO_TYPE_I8: ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); - ppc_lwz (p, ppc_r4, stackval_arg_pos + 1, ppc_r31); + ppc_lwz (p, ppc_r4, stackval_arg_pos + 4, ppc_r31); break; case MONO_TYPE_R4: ppc_lfs (p, ppc_f1, stackval_arg_pos, ppc_r31); -- cgit v1.1 From 027755140cf39776018e520f7cd838e319fb9a34 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 23 May 2002 07:44:00 +0000 Subject: 2002-05-23 Dietmar Maurer * delegate.c: move the thread pool to metadata/threadpool.c, code cleanup. * threadpool.[ch]: impl. a threadpool that can be used by mint and mono. svn path=/trunk/mono/; revision=4875 --- ChangeLog | 6 ++++++ x86/tramp.c | 30 ++++++++++-------------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/ChangeLog b/ChangeLog index a66edf3..f911ef5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2002-05-23 Dietmar Maurer + + * x86/tramp.c (mono_create_method_pointer): removed the magic + trick to store the function pointer in the prolog and use the same + mechanism as in the jit. + 2002-05-13 Radek Doulik * ppc/tramp.c (emit_save_parameters): fix I8 parameters diff --git a/x86/tramp.c b/x86/tramp.c index baae8e9..c2621eb 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -376,6 +376,7 @@ void * mono_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; + MonoJitInfo *ji; unsigned char *p, *code_buffer; gint32 local_size; gint32 stackval_pos, arg_pos = 8; @@ -393,13 +394,8 @@ mono_create_method_pointer (MonoMethod *method) stackval_pos = -local_size; /* - * Standard function prolog with magic trick. + * Standard function prolog. */ - x86_jump_code (p, code_buffer + 8); - *p++ = 'M'; - *p++ = 'o'; - *(void**)p = method; - p += 4; x86_push_reg (p, X86_EBP); x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size); @@ -506,19 +502,13 @@ mono_create_method_pointer (MonoMethod *method) x86_ret (p); g_assert (p - code_buffer < 512); - return g_memdup (code_buffer, p - code_buffer); -} -/* - * mono_create_method_pointer () will insert a pointer to the MonoMethod - * so that the interp can easily get at the data: this function will retrieve - * the method from the code stream. - */ -MonoMethod* -mono_method_pointer_get (void *code) -{ - unsigned char *c = code; - if (c [2] != 'M' || c [3] != 'o') - return NULL; - return *(MonoMethod**)(c + sizeof (gpointer)); + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = g_memdup (code_buffer, p - code_buffer); + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; } -- cgit v1.1 From b0826d366f4f32c6ef772c0a9deef5a9b4157f0b Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Mon, 27 May 2002 22:56:15 +0000 Subject: Updated copyright headers to the standard template svn path=/trunk/mono/; revision=4975 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index f911ef5..6dee480 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-05-27 Miguel de Icaza + + * x86/x86-codegen.h: Set the standard header format. + 2002-05-23 Dietmar Maurer * x86/tramp.c (mono_create_method_pointer): removed the magic diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 571a027..ba64bbf 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1,8 +1,15 @@ -/* Copyright (C) 2000 Intel Corporation. All rights reserved. - Copyright (C) 2001 Ximian, Inc. -// -// $Header: /home/miguel/third-conversion/public/mono/mono/arch/x86/x86-codegen.h,v 1.30 2002/05/10 19:25:15 serge Exp $ -*/ +/* + * x86-codegen.h: Macros for generating x86 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ #ifndef X86_H #define X86_H -- cgit v1.1 From 1b8d1ed7ce3e489dcf53cc2369a3d6d482d5901d Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 28 May 2002 12:23:00 +0000 Subject: 2002-05-28 Dietmar Maurer * x86.brg: impl. CKFINITE svn path=/trunk/mono/; revision=4988 --- x86/x86-codegen.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ba64bbf..4cb8d07 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1532,6 +1532,8 @@ typedef union { #define x86_fsin(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) #define x86_fcos(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) #define x86_fabs(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) +#define x86_ftst(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0) +#define x86_fxam(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0) #define x86_fpatan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) #define x86_fprem(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) #define x86_fprem1(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) -- cgit v1.1 From 9fe623bf5c85da9328f895680d8688987a94427e Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 30 May 2002 11:04:53 +0000 Subject: 2002-05-30 Dietmar Maurer * x86.brg (reg): bug fix in LOCALLOC * mono.c (main): new switch --nointrinsic to disable memcpy opt. * x86.brg: added block copy/init optimizations from Serge (serge@wildwestsoftware.com) svn path=/trunk/mono/; revision=5025 --- x86/x86-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 4cb8d07..f69534b 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -320,6 +320,10 @@ typedef union { #define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0) #define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0) #define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0) +#define x86_stosd(inst) x86_stosl((inst)) +#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0) +#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) +#define x86_movsd(inst) x86_movsl((inst)) #define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) -- cgit v1.1 From 0c268fdddc804751bba57401c02b139368f7a01c Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 31 May 2002 10:55:37 +0000 Subject: Compilation fixes. svn path=/trunk/mono/; revision=5054 --- sparc/tramp.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sparc/tramp.c b/sparc/tramp.c index 8bc08ea..f628fa0 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -29,14 +29,16 @@ static void fake_func (void (*callme)(gpointer, gpointer), stackval *retval, void *this_obj, stackval *arguments) { - //*(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); - //*(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); + /* + *(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); + *(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); + */ /* internal_from_handle() */ /* return (gpointer)(*callme) (((MonoType *)arguments [0].data.p)->data.klass); */ /* InitializeArray() */ - return (*callme) (arguments [0].data.p, arguments [1].data.p); + (*callme) (arguments [0].data.p, arguments [1].data.p); } static const char * -- cgit v1.1 From 5ff6eebba3bc5e1662b84a34a276d6842e41ab87 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 1 Jun 2002 08:08:34 +0000 Subject: Kill warning. svn path=/trunk/mono/; revision=5075 --- x86/tramp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/x86/tramp.c b/x86/tramp.c index c2621eb..1570361 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -9,6 +9,7 @@ #include "config.h" #include +#include #include "x86-codegen.h" #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" -- cgit v1.1 From 02476784232f22f91e347750c3fb8018d770d057 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 18 Jun 2002 04:38:23 +0000 Subject: Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro * x86/tramp.c: marshal simple arrays correctly. svn path=/trunk/mono/; revision=5316 --- ChangeLog | 5 +++++ x86/tramp.c | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 6dee480..4f98ea1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro + + * x86/tramp.c: marshal simple arrays correctly. + 2002-05-27 Miguel de Icaza * x86/x86-codegen.h: Set the standard header format. diff --git a/x86/tramp.c b/x86/tramp.c index 1570361..a08e743 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -184,12 +184,20 @@ enum_marshal: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_R4: x86_push_membase (p, X86_EDX, arg_pos); break; + case MONO_TYPE_SZARRAY: + if (need_marshal) { + x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_alu_reg_imm (p, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoArray, vector)); + x86_push_reg (p, X86_EAX); + } else { + x86_push_membase (p, X86_EDX, arg_pos); + } + break; case MONO_TYPE_VALUETYPE: if (!sig->params [i - 1]->data.klass->enumtype) { /* it's a structure that fits in 4 bytes, need to push the value pointed to */ -- cgit v1.1 From ef9afb744f4679c465be380b4285928fff50db5e Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sat, 6 Jul 2002 01:41:14 +0000 Subject: 2002-07-05 Radek Doulik * ppc/tramp.c: removed magic hack svn path=/trunk/mono/; revision=5614 --- ChangeLog | 3 +++ ppc/tramp.c | 33 +++++++++------------------------ 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4f98ea1..becdf94 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2002-07-05 Radek Doulik + + * ppc/tramp.c: removed magic hack Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro diff --git a/ppc/tramp.c b/ppc/tramp.c index 72d5530..eb0fd3a 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -603,12 +603,13 @@ void * mono_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; + MonoJitInfo *ji; guint8 *p, *code_buffer; guint i, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param; guint32 simpletype; - code_size = 512; - stack_size = 512; + code_size = 1024; + stack_size = 1024; stack_param = 0; sig = method->signature; @@ -617,14 +618,6 @@ mono_create_method_pointer (MonoMethod *method) DEBUG (printf ("\nDelegate [start emiting] %s\n", method->name)); - /* jump after header which consist of "Mono" + method ptr */ - ppc_b (p, 3); - *p = 'M'; p ++; - *p = 'o'; p ++; - *p = 'n'; p ++; - *p = 'o'; p ++; - *(void **) p = method; p += 4; - /* prolog */ ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ ppc_mflr (p, ppc_r0); /* r0 <--- LR */ @@ -798,20 +791,12 @@ mono_create_method_pointer (MonoMethod *method) DEBUG (printf ("Delegate [end emiting]\n")); - return (MonoPIFunc) code_buffer; -} + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = code_buffer; + mono_jit_info_table_add (mono_root_domain, ji); -/* - * mono_create_method_pointer () will insert a pointer to the MonoMethod - * so that the interp can easily get at the data: this function will retrieve - * the method from the code stream. - */ -MonoMethod* -mono_method_pointer_get (void *code) -{ - unsigned char *c = code; - if (c [4] != 'M' || c [5] != 'o' || c [6] != 'n' || c [7] != 'o') - return NULL; - return *(MonoMethod**)(c + 8); + return ji->code_start; } -- cgit v1.1 From 2b677a332d7e811ca9cc75d271d069787f0495c1 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Mon, 8 Jul 2002 16:13:36 +0000 Subject: 2002-07-08 Radek Doulik * ppc/tramp.c: marshaling for SZARRAY svn path=/trunk/mono/; revision=5650 --- ChangeLog | 4 ++++ ppc/tramp.c | 22 ++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index becdf94..5260f79 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-07-08 Radek Doulik + + * ppc/tramp.c: marshaling for SZARRAY + 2002-07-05 Radek Doulik * ppc/tramp.c: removed magic hack diff --git a/ppc/tramp.c b/ppc/tramp.c index eb0fd3a..17a90b3 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -127,11 +127,16 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: add_general (&gr, stack_size, code_size, TRUE); break; + case MONO_TYPE_SZARRAY: + add_general (&gr, stack_size, code_size, TRUE); + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) + break; + *code_size += 4; + break; case MONO_TYPE_VALUETYPE: if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; @@ -327,11 +332,24 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: SAVE_4_IN_GENERIC_REGISTER; break; + case MONO_TYPE_SZARRAY: + if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { + SAVE_4_IN_GENERIC_REGISTER; + } else { + g_warning ("untested marshaling\n"); + if (gr < GENERAL_REGS) { + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr, G_STRUCT_OFFSET (MonoArray, vector), ppc_r3 + gr); + gr ++; + } else { + NOT_IMPLEMENTED ("save marshalled SZARRAY on stack"); + } + } + break; case MONO_TYPE_VALUETYPE: if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; -- cgit v1.1 From ebf4ad275e84a3887798ac765bdc1f0ed457cd5a Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 19 Jul 2002 12:21:01 +0000 Subject: Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro * x86/tramp.c: fix float loads. Simple delegate marshaling fix. svn path=/trunk/mono/; revision=5909 --- ChangeLog | 5 +++++ x86/tramp.c | 20 ++++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5260f79..f0d8714 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix float loads. Simple delegate marshaling fix. + 2002-07-08 Radek Doulik * ppc/tramp.c: marshaling for SZARRAY diff --git a/x86/tramp.c b/x86/tramp.c index a08e743..bd7acf6 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -184,11 +184,27 @@ enum_marshal: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: - case MONO_TYPE_R4: x86_push_membase (p, X86_EDX, arg_pos); break; + case MONO_TYPE_R4: + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 4); + x86_fld_membase (p, X86_EDX, arg_pos, TRUE); + x86_fst_membase (p, X86_ESP, 0, FALSE, TRUE); + break; + case MONO_TYPE_CLASS: + if (need_marshal) { + if (sig->params [i - 1]->data.klass->delegate) { + /* should we use a wrapper to invoke the multicast delegates? */ + x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_alu_reg_imm (p, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr)); + x86_push_reg (p, X86_EAX); + } else + g_error ("unhandled case"); + } else { + x86_push_membase (p, X86_EDX, arg_pos); + } + break; case MONO_TYPE_SZARRAY: if (need_marshal) { x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); -- cgit v1.1 From 87f9fd554284e9d2037c8757a4211cf710a85ac0 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 31 Jul 2002 11:00:53 +0000 Subject: 2002-07-31 Dietmar Maurer * interp.c: use the new marshaling code. better delegate/remoting support. * debug-helpers.c (mono_method_full_name): only print a number to indicate wrapper type (so that the output is more readable in traces). * x86/tramp.c: remove code to handle PInvoke because this is no longer needed. svn path=/trunk/mono/; revision=6278 --- ChangeLog | 4 ++ x86/tramp.c | 175 ++++++++++++++++++++---------------------------------------- 2 files changed, 62 insertions(+), 117 deletions(-) diff --git a/ChangeLog b/ChangeLog index f0d8714..f47e4ee 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-07-31 Dietmar Maurer + + * x86/tramp.c: remove code to handle PInvoke because this is no + longer needed. Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index bd7acf6..217b79d 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -30,23 +30,22 @@ #define ARG_SIZE sizeof (stackval) MonoPIFunc -mono_create_trampoline (MonoMethod *method, int runtime) +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { - MonoMethodSignature *sig; unsigned char *p, *code_buffer; guint32 local_size = 0, stack_size = 0, code_size = 50; guint32 arg_pos, simpletype; int i, stringp; - int need_marshal; - GList *free_locs = NULL; + static GHashTable *cache = NULL; + MonoPIFunc res; - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) - need_marshal = 0; - else - need_marshal = 1; + if (!cache) + cache = g_hash_table_new ((GHashFunc)mono_signature_hash, + (GCompareFunc)mono_metadata_signature_equal); + + if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) + return res; - sig = method->signature; - if (sig->hasthis) { stack_size += sizeof (gpointer); code_size += 5; @@ -80,16 +79,22 @@ enum_calc_size: stack_size += 4; code_size += i < 10 ? 5 : 8; break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + int size; if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) - g_error ("can only marshal enums, not generic structures (size: %d)", mono_class_value_size (sig->params [i]->data.klass, NULL)); - stack_size += 4; - code_size += i < 10 ? 5 : 8; + if ((size = mono_class_value_size (sig->params [i]->data.klass, NULL)) != 4) { + stack_size += size + 3; + stack_size &= ~3; + code_size += 32; + } else { + stack_size += 4; + code_size += i < 10 ? 5 : 8; + } break; + } case MONO_TYPE_STRING: stack_size += 4; code_size += 20; @@ -143,31 +148,7 @@ enum_calc_size: for (i = sig->param_count; i; --i) { arg_pos = ARG_SIZE * (i - 1); if (sig->params [i - 1]->byref) { - if (!need_marshal) { - x86_push_membase (p, X86_EDX, arg_pos); - continue; - } - if (sig->params [i - 1]->type == MONO_TYPE_SZARRAY && - sig->params [i - 1]->data.type->type == MONO_TYPE_STRING) { - x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_push_regp (p, X86_EAX); - x86_mov_reg_imm (p, X86_EDX, mono_marshal_string_array); - x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - /* - * Store the pointer in a local we'll free later. - */ - stringp++; - x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); - free_locs = g_list_prepend (free_locs, GUINT_TO_POINTER (LOC_POS * stringp)); - /* load the pointer and push it */ - x86_lea_membase (p, X86_EAX, X86_EBP, LOC_POS * stringp); - x86_push_reg (p, X86_EAX); - /* restore pointer to args in EDX */ - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); - } else { - x86_push_membase (p, X86_EDX, arg_pos); - } + x86_push_membase (p, X86_EDX, arg_pos); continue; } simpletype = sig->params [i - 1]->type; @@ -193,32 +174,32 @@ enum_marshal: x86_fst_membase (p, X86_ESP, 0, FALSE, TRUE); break; case MONO_TYPE_CLASS: - if (need_marshal) { - if (sig->params [i - 1]->data.klass->delegate) { - /* should we use a wrapper to invoke the multicast delegates? */ - x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_alu_reg_imm (p, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr)); - x86_push_reg (p, X86_EAX); - } else - g_error ("unhandled case"); - } else { - x86_push_membase (p, X86_EDX, arg_pos); - } + x86_push_membase (p, X86_EDX, arg_pos); break; case MONO_TYPE_SZARRAY: - if (need_marshal) { - x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_alu_reg_imm (p, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoArray, vector)); - x86_push_reg (p, X86_EAX); - } else { - x86_push_membase (p, X86_EDX, arg_pos); - } + x86_push_membase (p, X86_EDX, arg_pos); break; case MONO_TYPE_VALUETYPE: if (!sig->params [i - 1]->data.klass->enumtype) { - /* it's a structure that fits in 4 bytes, need to push the value pointed to */ - x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_push_regp (p, X86_EAX); + int size = mono_class_value_size (sig->params [i - 1]->data.klass, NULL); + if (size == 4) { + /* it's a structure that fits in 4 bytes, need to push the value pointed to */ + x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); + x86_push_regp (p, X86_EAX); + } else { + int ss = size; + ss += 3; + ss &= ~3; + + x86_alu_reg_imm (p, X86_SUB, X86_ESP, ss); + x86_push_imm (p, size); + x86_push_membase (p, X86_EDX, arg_pos); + x86_lea_membase (p, X86_EAX, X86_ESP, 2*4); + x86_push_reg (p, X86_EAX); + x86_mov_reg_imm (p, X86_EAX, memcpy); + x86_call_reg (p, X86_EAX); + x86_alu_reg_imm (p, X86_ADD, X86_ESP, 12); + } } else { /* it's an enum value */ simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; @@ -226,31 +207,7 @@ enum_marshal: } break; case MONO_TYPE_STRING: - /* - * If it is an internalcall we assume it's the object we want. - * Yet another reason why MONO_TYPE_STRING should not be used to indicate char*. - */ - if (!need_marshal) { - x86_push_membase (p, X86_EDX, arg_pos); - break; - } - /*if (frame->method->flags & PINVOKE_ATTRIBUTE_CHAR_SET_ANSI*/ x86_push_membase (p, X86_EDX, arg_pos); - x86_mov_reg_imm (p, X86_EDX, mono_string_to_utf8); - x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - x86_push_reg (p, X86_EAX); - /* - * Store the pointer in a local we'll free later. - */ - stringp++; - x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); - free_locs = g_list_prepend (free_locs, GUINT_TO_POINTER (LOC_POS * stringp)); - /* - * we didn't save the reg: restore it here. - */ - if (i > 1) - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); break; case MONO_TYPE_I8: case MONO_TYPE_U8: @@ -285,14 +242,12 @@ enum_marshal: * FP values are on the FP stack. */ - if (sig->ret->byref || - (method->klass == mono_defaults.string_class && - *method->name == '.' && !strcmp (method->name, ".ctor"))) { + if (sig->ret->byref || string_ctor) { x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); } else { simpletype = sig->ret->type; -enum_retvalue: + enum_retvalue: switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: @@ -318,20 +273,6 @@ enum_retvalue: x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; case MONO_TYPE_STRING: - if (!need_marshal) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - break; - } - - /* If the argument is non-null, then convert the value back */ - x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); - x86_branch8 (p, X86_CC_EQ, 11, FALSE); - x86_push_reg (p, X86_EAX); - x86_mov_reg_imm (p, X86_EDX, mono_string_new_wrapper); - x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); break; @@ -361,25 +302,17 @@ enum_retvalue: } /* - * free the allocated strings. - */ - if (need_marshal) { - GList* tmp; - for (tmp = free_locs; tmp; tmp = tmp->next) { - x86_mov_reg_imm (p, X86_EDX, g_free); - x86_push_membase (p, X86_EBP, GPOINTER_TO_UINT (tmp->data)); - x86_call_reg (p, X86_EDX); - } - g_list_free (free_locs); - } - /* * Standard epilog. */ x86_leave (p); x86_ret (p); g_assert (p - code_buffer < code_size); - return g_memdup (code_buffer, p - code_buffer); + res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); + + g_hash_table_insert (cache, sig, res); + + return res; } #define MINV_POS (- sizeof (MonoInvocation)) @@ -437,7 +370,6 @@ mono_create_method_pointer (MonoMethod *method) * Set the method pointer. */ x86_mov_membase_imm (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), (int)method, 4); - /* * Handle this. */ @@ -492,6 +424,8 @@ mono_create_method_pointer (MonoMethod *method) if (sig->ret->byref) { x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); } else { + int simpletype = sig->ret->type; + enum_retvalue: switch (sig->ret->type) { case MONO_TYPE_VOID: break; @@ -514,6 +448,13 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_R8: x86_fld_membase (p, X86_EAX, 0, TRUE); break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + /* do nothing ? */ + break; default: g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); break; -- cgit v1.1 From 1be0ee94a17d2a4b7edb513d845d88ba5fed8285 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 31 Jul 2002 11:53:19 +0000 Subject: 2002-07-31 Dietmar Maurer * x86/tramp.c: (mono_create_method_pointer): return method->addr for pinvoke methods * interp.c (ves_exec_method): bug fix - directly jump to handle_exception. svn path=/trunk/mono/; revision=6280 --- ChangeLog | 1 + x86/tramp.c | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index f47e4ee..5b92bfd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,7 @@ * x86/tramp.c: remove code to handle PInvoke because this is no longer needed. + (mono_create_method_pointer): return method->addr for pinvoke methods Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index 217b79d..9381f36 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -344,6 +344,16 @@ mono_create_method_pointer (MonoMethod *method) * If it is a static P/Invoke method, we can just return the pointer * to the method implementation. */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + sig = method->signature; code_buffer = p = alloca (512); /* FIXME: check for overflows... */ -- cgit v1.1 From 27a4251f2a6fd091ddc8084ad14a8808c136431d Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 1 Aug 2002 06:40:11 +0000 Subject: 2002-08-01 Dietmar Maurer * interp.c (stackval_from_data): add pinvoke argument (stackval_to_data): add pinvoke argument. We need consider the fact that unmanages structures may have different sizes. * x86/tramp.c (mono_create_method_pointer): allocate space for value types. svn path=/trunk/mono/; revision=6308 --- ChangeLog | 5 +++++ x86/tramp.c | 40 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5b92bfd..912248e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-08-01 Dietmar Maurer + + * x86/tramp.c (mono_create_method_pointer): allocate space for + value types. + 2002-07-31 Dietmar Maurer * x86/tramp.c: remove code to handle PInvoke because this is no diff --git a/x86/tramp.c b/x86/tramp.c index 9381f36..f9d8658 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -338,7 +338,8 @@ mono_create_method_pointer (MonoMethod *method) unsigned char *p, *code_buffer; gint32 local_size; gint32 stackval_pos, arg_pos = 8; - int i, align; + int i, size, align, cpos; + int vtbuf [sig->param_count]; /* * If it is a static P/Invoke method, we can just return the pointer @@ -359,8 +360,33 @@ mono_create_method_pointer (MonoMethod *method) code_buffer = p = alloca (512); /* FIXME: check for overflows... */ local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1); + + local_size += 7; + local_size &= ~7; + stackval_pos = -local_size; + cpos = 0; + for (i = 0; i < sig->param_count; i++) { + MonoType *type = sig->params [i]; + vtbuf [i] = -1; + if (type->type == MONO_TYPE_VALUETYPE) { + MonoClass *klass = type->data.klass; + if (klass->enumtype) + continue; + size = mono_class_native_size (klass, &align); + cpos += align - 1; + cpos &= ~(align - 1); + vtbuf [i] = cpos; + cpos += size; + } + } + + cpos += 7; + cpos &= ~7; + + local_size += cpos; + /* * Standard function prolog. */ @@ -401,16 +427,24 @@ mono_create_method_pointer (MonoMethod *method) x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), X86_EAX, 4); for (i = 0; i < sig->param_count; ++i) { + if (vtbuf [i] >= 0) { + x86_lea_membase (p, X86_EAX, X86_EBP, - local_size + vtbuf [i]); + x86_mov_membase_reg (p, X86_EBP, stackval_pos, X86_EAX, 4); + } x86_mov_reg_imm (p, X86_ECX, stackval_from_data); x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); + x86_push_imm (p, sig->pinvoke); x86_push_reg (p, X86_EDX); x86_push_reg (p, X86_EAX); x86_push_imm (p, sig->params [i]); x86_call_reg (p, X86_ECX); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 12); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); stackval_pos += sizeof (stackval); - arg_pos += mono_type_stack_size (sig->params [i], &align); + if (sig->pinvoke) + arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + arg_pos += mono_type_stack_size (sig->params [i], &align); } /* -- cgit v1.1 From fbb833e1937ec3e3183bd1219e0f2391faa62718 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Thu, 1 Aug 2002 14:17:18 +0000 Subject: 2002-08-01 Dietmar Maurer * x86/tramp.c (mono_create_trampoline): also push the value type pointer for methods returning value types. (mono_create_method_pointer): support valuetype returns. * interp.c (ves_pinvoke_method): do not call stackval_from_data if the result is a value type. svn path=/trunk/mono/; revision=6311 --- ChangeLog | 3 +++ x86/tramp.c | 43 ++++++++++++++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index 912248e..a884ca0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,9 @@ * x86/tramp.c (mono_create_method_pointer): allocate space for value types. + (mono_create_trampoline): also push the value type pointer for + methods returning value types. + (mono_create_method_pointer): support valuetype returns. 2002-07-31 Dietmar Maurer diff --git a/x86/tramp.c b/x86/tramp.c index f9d8658..8e07996 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -48,9 +48,14 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) if (sig->hasthis) { stack_size += sizeof (gpointer); - code_size += 5; + code_size += 10; } + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { + stack_size += sizeof (gpointer); + code_size += 5; + } + for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { stack_size += sizeof (gpointer); @@ -229,6 +234,14 @@ enum_marshal: } } + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); + x86_push_membase (p, X86_ECX, 0); + } + } + /* * Insert call to function */ @@ -317,7 +330,6 @@ enum_marshal: #define MINV_POS (- sizeof (MonoInvocation)) #define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) -#define OBJ_POS 8 #define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) /* @@ -406,6 +418,10 @@ mono_create_method_pointer (MonoMethod *method) * Set the method pointer. */ x86_mov_membase_imm (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), (int)method, 4); + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) + arg_pos += 4; + /* * Handle this. */ @@ -414,7 +430,7 @@ mono_create_method_pointer (MonoMethod *method) /* * Grab it from the stack, otherwise it's already in ECX. */ - x86_mov_reg_membase (p, X86_ECX, X86_EBP, OBJ_POS, 4); + x86_mov_reg_membase (p, X86_ECX, X86_EBP, arg_pos, 4); arg_pos += 4; } x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), X86_ECX, 4); @@ -441,6 +457,7 @@ mono_create_method_pointer (MonoMethod *method) x86_call_reg (p, X86_ECX); x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); stackval_pos += sizeof (stackval); + /* fixme: alignment */ if (sig->pinvoke) arg_pos += mono_type_native_stack_size (sig->params [i], &align); else @@ -452,6 +469,13 @@ mono_create_method_pointer (MonoMethod *method) */ x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)), X86_EAX, 4); + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + x86_mov_reg_membase (p, X86_ECX, X86_EBP, 8, 4); + x86_mov_membase_reg (p, X86_EBP, stackval_pos, X86_ECX, 4); + } + } /* * Call the method. @@ -460,7 +484,7 @@ mono_create_method_pointer (MonoMethod *method) x86_push_reg (p, X86_EAX); x86_mov_reg_imm (p, X86_EDX, ves_exec_method); x86_call_reg (p, X86_EDX); - + /* * Move the return value to the proper place. */ @@ -497,7 +521,16 @@ mono_create_method_pointer (MonoMethod *method) simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } - /* do nothing ? */ + + x86_push_imm (p, sig->pinvoke); + x86_push_membase (p, X86_EBP, stackval_pos); + x86_push_reg (p, X86_EAX); + x86_push_imm (p, sig->ret); + x86_mov_reg_imm (p, X86_ECX, stackval_to_data); + x86_call_reg (p, X86_ECX); + //x86_breakpoint (p); + x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); + break; default: g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); -- cgit v1.1 From cc4396df6db395836340d26ad2f2d920f946729f Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Fri, 2 Aug 2002 07:13:54 +0000 Subject: 2002-08-02 Dietmar Maurer * marshal.c (mono_delegate_to_ftnptr): pass delegate->target instead of the delegate itself as this pointer (bug #28383) svn path=/trunk/mono/; revision=6348 --- x86/tramp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x86/tramp.c b/x86/tramp.c index 8e07996..4046f5d 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -3,7 +3,9 @@ * * Copyright (C) Ximian Inc. * - * Author: Paolo Molaro (lupus@ximian.com) + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Dietmar Maurer (dietmar@ximian.com) * */ @@ -528,7 +530,6 @@ mono_create_method_pointer (MonoMethod *method) x86_push_imm (p, sig->ret); x86_mov_reg_imm (p, X86_ECX, stackval_to_data); x86_call_reg (p, X86_ECX); - //x86_breakpoint (p); x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); break; -- cgit v1.1 From 347f6a854167fa5a26484b83736de86f5ffd8ea0 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Fri, 2 Aug 2002 17:55:44 +0000 Subject: did quick surgery to update for Dietmar's new code svn path=/trunk/mono/; revision=6359 --- ppc/tramp.c | 166 ++++++++++-------------------------------------------------- 1 file changed, 26 insertions(+), 140 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 17a90b3..1227b51 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -21,7 +21,7 @@ #endif #endif -#define DEBUG(x) +#define DEBUG(x) x /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -92,18 +92,15 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } static void inline -calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint *strings, gint runtime) +calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, gboolean string_ctor) { - MonoMethodSignature *sig; guint i, fr, gr; guint32 simpletype; fr = gr = 0; *stack_size = MINIMAL_STACK_SIZE*4; *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS)*4; - *strings = 0; - sig = method->signature; if (sig->hasthis) { add_general (&gr, stack_size, code_size, TRUE); } @@ -129,12 +126,11 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint case MONO_TYPE_PTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: add_general (&gr, stack_size, code_size, TRUE); break; case MONO_TYPE_SZARRAY: add_general (&gr, stack_size, code_size, TRUE); - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) - break; *code_size += 4; break; case MONO_TYPE_VALUETYPE: @@ -148,15 +144,6 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint add_general (&gr, stack_size, code_size, TRUE); *code_size += 4; break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - add_general (&gr, stack_size, code_size, TRUE); - break; - } - (*strings) ++; - *code_size += 12*4; - *stack_size += 4; - break; case MONO_TYPE_I8: add_general (&gr, stack_size, code_size, FALSE); break; @@ -174,9 +161,7 @@ calculate_sizes (MonoMethod *method, guint *stack_size, guint *code_size, guint } } - if (sig->ret->byref || - (method->klass == mono_defaults.string_class && - *method->name == '.' && !strcmp (method->name, ".ctor"))) { + if (sig->ret->byref || string_ctor) { *code_size += 8; } else { simpletype = sig->ret->type; @@ -198,13 +183,8 @@ enum_retvalue: case MONO_TYPE_R8: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - *code_size += 8; - break; case MONO_TYPE_STRING: *code_size += 8; - if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { - *code_size += 16; - } break; case MONO_TYPE_I8: *code_size += 12; @@ -223,15 +203,6 @@ enum_retvalue: } } - if (*strings) { - /* space to keep parameters and prepared strings */ - *stack_size += 8; - *code_size += 16; - if (sig->hasthis) { - *stack_size += 4; - *code_size += 12; - } - } /* align stack size to 16 */ DEBUG (printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size)); *stack_size = (*stack_size + 15) & ~15; @@ -239,7 +210,7 @@ enum_retvalue: } static inline guint8 * -emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) +emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) { /* function prolog */ ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ @@ -248,28 +219,15 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ - /* handle our parameters */ - if (strings) { - ppc_stw (p, ppc_r30, stack_size - 16, ppc_r1); - ppc_stw (p, ppc_r29, stack_size - 12, ppc_r1); - if (method->signature->hasthis) { - ppc_stw (p, ppc_r28, 24, ppc_r1); - } - ppc_mr (p, ppc_r30, ppc_r6); /* args */ - ppc_mr (p, ppc_r29, ppc_r3); /* callme */ - if (method->signature->hasthis) { - ppc_mr (p, ppc_r28, ppc_r5); /* this */ - } - } else { - ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ - ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ - } + ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ + ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ + ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ return p; } -#define ARG_BASE strings ? ppc_r30 : ppc_r12 +#define ARG_BASE ppc_r12 #define SAVE_4_IN_GENERIC_REGISTER \ if (gr < GENERAL_REGS) { \ ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ @@ -281,32 +239,15 @@ emit_prolog (guint8 *p, MonoMethod *method, guint stack_size, guint strings) } inline static guint8* -emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) +emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size) { - MonoMethodSignature *sig; guint i, fr, gr, act_strs, stack_par_pos; guint32 simpletype; fr = gr = 0; act_strs = 0; - sig = method->signature; stack_par_pos = 8; - if (strings) { - for (i = 0; i < sig->param_count; ++i) { - if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING - && !((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime)) { - ppc_lis (p, ppc_r0, (guint32) mono_string_to_utf8 >> 16); - ppc_lwz (p, ppc_r3, i*16, ppc_r30); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_to_utf8 & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - ppc_stw (p, ppc_r3, stack_size - 24 - act_strs, ppc_r31); - act_strs += 4; - } - } - } - if (sig->hasthis) { ppc_mr (p, ppc_r3, ppc_r5); gr ++; @@ -334,13 +275,11 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str case MONO_TYPE_PTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: SAVE_4_IN_GENERIC_REGISTER; break; - case MONO_TYPE_SZARRAY: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - SAVE_4_IN_GENERIC_REGISTER; - } else { - g_warning ("untested marshaling\n"); + /* g_warning ("untested marshaling\n"); if (gr < GENERAL_REGS) { ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); ppc_lwz (p, ppc_r3 + gr, G_STRUCT_OFFSET (MonoArray, vector), ppc_r3 + gr); @@ -348,8 +287,7 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str } else { NOT_IMPLEMENTED ("save marshalled SZARRAY on stack"); } - } - break; + break; */ case MONO_TYPE_VALUETYPE: if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; @@ -366,18 +304,6 @@ emit_save_parameters (guint8 *p, MonoMethod *method, guint stack_size, guint str NOT_IMPLEMENTED ("save value type on stack"); } break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - SAVE_4_IN_GENERIC_REGISTER; - } else { - if (gr < 8) { - ppc_lwz (p, ppc_r3 + gr, stack_size - 24 - act_strs, ppc_r31); - gr ++; - act_strs += 4; - } else - NOT_IMPLEMENTED ("string on stack"); - } - break; case MONO_TYPE_I8: if (gr < 7) { if (gr & 1) @@ -439,19 +365,16 @@ mono_string_new_wrapper (const char *text) } */ static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gint runtime) +emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, guint stack_size, gboolean string_ctor) { - MonoMethodSignature *sig = method->signature; guint32 simpletype; /* call "callme" */ - ppc_mtlr (p, strings ? ppc_r29 : ppc_r0); + ppc_mtlr (p, ppc_r0); ppc_blrl (p); /* get return value */ - if (sig->ret->byref || - (method->klass == mono_defaults.string_class && - *method->name == '.' && !strcmp (method->name, ".ctor"))) { + if (sig->ret->byref || string_ctor) { ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ } else { @@ -478,20 +401,9 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; case MONO_TYPE_STRING: - if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { - ppc_lis (p, ppc_r0, (guint32) mono_string_new_wrapper >> 16); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) mono_string_new_wrapper & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - } - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; case MONO_TYPE_R4: ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ @@ -524,34 +436,8 @@ enum_retvalue: } static inline guint8 * -emit_epilog (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gboolean runtime) +emit_epilog (guint8 *p, MonoMethodSignature *sig, guint stack_size) { - if (strings) { - MonoMethodSignature *sig = method->signature; - guint i, act_strs; - - /* free allocated memory */ - act_strs = 0; - for (i = 0; i < sig->param_count; ++i) { - if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_STRING - && !((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime)) { - ppc_lis (p, ppc_r0, (guint32) g_free >> 16); - ppc_lwz (p, ppc_r3, stack_size - 24 - act_strs, ppc_r31); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) g_free & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - act_strs += 4; - } - } - - /* restore volatile registers */ - ppc_lwz (p, ppc_r30, stack_size - 16, ppc_r1); - ppc_lwz (p, ppc_r29, stack_size - 12, ppc_r1); - if (method->signature->hasthis) { - ppc_lwz (p, ppc_r28, 24, ppc_r1); - } - } - /* function epilog */ ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ ppc_lwz (p, ppc_r0, 4, ppc_r11); /* r0 <--- r11[4] load return address */ @@ -564,19 +450,19 @@ emit_epilog (guint8 *p, MonoMethod *method, guint stack_size, guint strings, gbo } MonoPIFunc -mono_create_trampoline (MonoMethod *method, int runtime) +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { guint8 *p, *code_buffer; - guint stack_size, code_size, strings; + guint stack_size, code_size; - DEBUG (printf ("\nPInvoke [start emiting] %s\n", method->name)); - calculate_sizes (method, &stack_size, &code_size, &strings, runtime); + DEBUG (printf ("\nPInvoke [start emiting]\n")); + calculate_sizes (sig, &stack_size, &code_size, string_ctor); p = code_buffer = alloc_code_memory (code_size); - p = emit_prolog (p, method, stack_size, strings); - p = emit_save_parameters (p, method, stack_size, strings, runtime); - p = emit_call_and_store_retval (p, method, stack_size, strings, runtime); - p = emit_epilog (p, method, stack_size, strings, runtime); + p = emit_prolog (p, sig, stack_size); + p = emit_save_parameters (p, sig, stack_size); + p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); + p = emit_epilog (p, sig, stack_size); /* { guchar *cp; -- cgit v1.1 From f73afba7e99de872e4e9d9dcf3c7c483632f6bc6 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Fri, 2 Aug 2002 18:13:59 +0000 Subject: more surgery svn path=/trunk/mono/; revision=6360 --- ppc/tramp.c | 58 ++++++++++++++++------------------------------------------ 1 file changed, 16 insertions(+), 42 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 1227b51..8798452 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -21,7 +21,7 @@ #endif #endif -#define DEBUG(x) x +#define DEBUG(x) /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -509,7 +509,7 @@ mono_create_method_pointer (MonoMethod *method) MonoMethodSignature *sig; MonoJitInfo *ji; guint8 *p, *code_buffer; - guint i, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param; + guint i, align, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param; guint32 simpletype; code_size = 1024; @@ -581,50 +581,16 @@ mono_create_method_pointer (MonoMethod *method) } \ ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); \ ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); \ - stackval_arg_pos ++; \ + /* fixme: alignment */ \ + if (sig->pinvoke) \ + stackval_arg_pos += mono_type_native_stack_size (sig->params [i], &align); \ + else \ + stackval_arg_pos += mono_type_stack_size (sig->params [i], &align); \ ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); \ \ CALL_STACKVAL_FROM_DATA - if (sig->params [i]->byref) { - CALL_SIZE_4; - continue; - } - simpletype = sig->params [i]->type; - enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - CALL_SIZE_4; - break; - case MONO_TYPE_VALUETYPE: - NOT_IMPLEMENTED ("value type"); - break; - case MONO_TYPE_I8: - NOT_IMPLEMENTED ("i8"); - break; - case MONO_TYPE_R4: - NOT_IMPLEMENTED ("r4"); - break; - case MONO_TYPE_R8: - NOT_IMPLEMENTED ("r8"); - break; - default: - g_error ("Can't delegate 0x%x type", sig->params [i]->type); - } + CALL_SIZE_4; } /* return value storage */ @@ -645,6 +611,7 @@ mono_create_method_pointer (MonoMethod *method) DEBUG (printf ("ret by ref\n")); ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); } else { + enum_retvalue: switch (sig->ret->type) { case MONO_TYPE_VOID: break; @@ -676,6 +643,13 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_R8: ppc_lfd (p, ppc_f1, stackval_arg_pos, ppc_r31); break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + NOT_IMPLEMENTED ("value type as ret val from delegate"); + break; default: g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); break; -- cgit v1.1 From e13f4a98c6fe61ec768b0da9d8832814a313ed78 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Fri, 2 Aug 2002 18:34:20 +0000 Subject: more WIP svn path=/trunk/mono/; revision=6363 --- ppc/tramp.c | 55 ++++++++++++++++++++++++++----------------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 8798452..e3fddb5 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -562,35 +562,32 @@ mono_create_method_pointer (MonoMethod *method) /* add stackval arguments */ for (i = 0; i < sig->param_count; ++i) { -#define CALL_STACKVAL_FROM_DATA \ - ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); \ - ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); \ - ppc_mtlr (p, ppc_r0); \ - ppc_blrl (p) -#define CALL_SIZE_4 \ - if (reg_param < 3 - (sig->hasthis ? 1 : 0)) { \ - ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - (sig->hasthis ? 1 : 0))*4); \ - reg_param ++; \ - } else if (reg_param < 8) { \ - ppc_stw (p, ppc_r3 + reg_param, local_pos, ppc_r31); \ - ppc_addi (p, ppc_r5, ppc_r31, local_pos); \ - reg_param ++; \ - } else { \ - ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); \ - stack_param ++; \ - } \ - ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); \ - ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); \ - /* fixme: alignment */ \ - if (sig->pinvoke) \ - stackval_arg_pos += mono_type_native_stack_size (sig->params [i], &align); \ - else \ - stackval_arg_pos += mono_type_stack_size (sig->params [i], &align); \ - ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); \ -\ - CALL_STACKVAL_FROM_DATA - - CALL_SIZE_4; + if (reg_param < 3 - (sig->hasthis ? 1 : 0)) { + ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - (sig->hasthis ? 1 : 0))*4); + reg_param ++; + } else if (reg_param < 8) { + ppc_stw (p, ppc_r3 + reg_param, local_pos, ppc_r31); + ppc_addi (p, ppc_r5, ppc_r31, local_pos); + reg_param ++; + } else { + ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); + stack_param ++; + } + ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); + ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); + + ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); + ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); + ppc_li (p, ppc_r6, sig->pinvoke); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + + /* fixme: alignment */ + if (sig->pinvoke) + stackval_arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + stackval_arg_pos += mono_type_stack_size (sig->params [i], &align); } /* return value storage */ -- cgit v1.1 From 60179dd8c27bf3c080ca2c7db818c01a51c9d4b1 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 5 Aug 2002 09:53:43 +0000 Subject: 2002-08-05 Dietmar Maurer * x86/tramp.c (mono_create_trampoline): fixed stack_size bug svn path=/trunk/mono/; revision=6408 --- ChangeLog | 4 ++++ x86/tramp.c | 23 ++++++----------------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/ChangeLog b/ChangeLog index a884ca0..3a37835 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-08-05 Dietmar Maurer + + * x86/tramp.c (mono_create_trampoline): fixed stack_size bug + 2002-08-01 Dietmar Maurer * x86/tramp.c (mono_create_method_pointer): allocate space for diff --git a/x86/tramp.c b/x86/tramp.c index 4046f5d..4d5634c 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -35,7 +35,7 @@ MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { unsigned char *p, *code_buffer; - guint32 local_size = 0, stack_size = 0, code_size = 50; + guint32 stack_size = 0, code_size = 50; guint32 arg_pos, simpletype; int i, stringp; static GHashTable *cache = NULL; @@ -62,7 +62,6 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) if (sig->params [i]->byref) { stack_size += sizeof (gpointer); code_size += 20; - local_size++; continue; } simpletype = sig->params [i]->type; @@ -83,6 +82,7 @@ enum_calc_size: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: stack_size += 4; code_size += i < 10 ? 5 : 8; break; @@ -102,11 +102,6 @@ enum_calc_size: } break; } - case MONO_TYPE_STRING: - stack_size += 4; - code_size += 20; - local_size++; - break; case MONO_TYPE_I8: stack_size += 8; code_size += i < 10 ? 5 : 8; @@ -131,15 +126,11 @@ enum_calc_size: x86_push_reg (p, X86_EBP); x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); /* - * We store some local vars here to handle string pointers. * and align to 16 byte boundary... */ - if (local_size) { - x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size * 4); - stack_size = (stack_size * local_size * 4) % 16; - } else { - stack_size = stack_size % 16; - } + stack_size += 15; + stack_size &= ~15; + if (stack_size) x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); @@ -173,6 +164,7 @@ enum_marshal: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: x86_push_membase (p, X86_EDX, arg_pos); break; case MONO_TYPE_R4: @@ -213,9 +205,6 @@ enum_marshal: goto enum_marshal; } break; - case MONO_TYPE_STRING: - x86_push_membase (p, X86_EDX, arg_pos); - break; case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R8: -- cgit v1.1 From dc11862f43a6240bcc35d2ef96fb04750c4bf930 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Mon, 5 Aug 2002 16:43:06 +0000 Subject: x86-codegen.h: fixed bug in x86_memindex_emit, for basereg == EBP && disp == imm32; svn path=/trunk/mono/; revision=6433 --- x86/x86-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index f69534b..cea7d89 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -266,7 +266,7 @@ typedef union { x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ x86_imm_emit8 ((inst), (disp)); \ } else { \ - x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), 2, (r), 4); \ x86_address_byte ((inst), (shift), (indexreg), 5); \ x86_imm_emit32 ((inst), (disp)); \ } \ -- cgit v1.1 From f8f8b65c484f48436941e4985cfb4b837cff4ceb Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 5 Aug 2002 17:28:10 +0000 Subject: Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro * x86/tramp.c: fix random memory read in mono_create_method_pointer. svn path=/trunk/mono/; revision=6436 --- ChangeLog | 5 +++++ x86/tramp.c | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 3a37835..4308f3e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix random memory read in mono_create_method_pointer. + 2002-08-05 Dietmar Maurer * x86/tramp.c (mono_create_trampoline): fixed stack_size bug diff --git a/x86/tramp.c b/x86/tramp.c index 4d5634c..b6dfeb2 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -342,7 +342,7 @@ mono_create_method_pointer (MonoMethod *method) gint32 local_size; gint32 stackval_pos, arg_pos = 8; int i, size, align, cpos; - int vtbuf [sig->param_count]; + int *vtbuf; /* * If it is a static P/Invoke method, we can just return the pointer @@ -361,6 +361,7 @@ mono_create_method_pointer (MonoMethod *method) sig = method->signature; code_buffer = p = alloca (512); /* FIXME: check for overflows... */ + vtbuf = alloca (sizeof(int)*sig->param_count); local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1); -- cgit v1.1 From fafa1892b8b0315cab29de09f09f2aa5041b61a7 Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Tue, 20 Aug 2002 15:03:07 +0000 Subject: This nearly completes SPARC trampoline support for mint/mono. The delegate code still needs some work. There are bugs. Send crash reports, as well as .cs code and exe's to crichton@gimp.org Also, if anyone gets Bus Errors in the code, let me know as well, I've been hunting down alignment bugs as well. svn path=/trunk/mono/; revision=6812 --- ChangeLog | 4 + sparc/tramp.c | 284 +++++++++++++++++++++++++++------------------------------- 2 files changed, 136 insertions(+), 152 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4308f3e..67216cd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-08-20 Mark Crichton + + * sparc/tramp.c (mono_create_trampoline): Now works on Sparc. Tested + on an Ultra 2 running Linux. Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro diff --git a/sparc/tramp.c b/sparc/tramp.c index f628fa0..f7e9b69 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -3,10 +3,11 @@ * Create trampolines to invoke arbitrary functions. * * Copyright (C) Ximian Inc. - * + * * Authors: Paolo Molaro (lupus@ximian.com) * Jeffrey Stedfast - * + * Mark Crichton + * */ #include "config.h" @@ -23,23 +24,17 @@ #define THIS_POS sparc_i2 #define ARGP_POS sparc_i3 #define LOC_POS -4 +#define MINV_POS 8 #define ARG_SIZE sizeof (stackval) -static void -fake_func (void (*callme)(gpointer, gpointer), stackval *retval, void *this_obj, stackval *arguments) -{ - /* - *(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); - *(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); - */ - - /* internal_from_handle() */ - /* return (gpointer)(*callme) (((MonoType *)arguments [0].data.p)->data.klass); */ - - /* InitializeArray() */ - (*callme) (arguments [0].data.p, arguments [1].data.p); -} +/* Some assembly... */ +#define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") + + +/* WARNING: This code WILL BREAK. We do not currently check the status + * of the registers. Things can get trampled. You have been warned. + */ static const char * mono_type (int type) @@ -115,9 +110,8 @@ mono_type (int type) } static void -calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, guint32 *code_size, int runtime) +calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_size, guint32 *code_size, gboolean string_ctor) { - MonoMethodSignature *sig = method->signature; guint32 local = 0, stack = 0, code = 6; guint32 simpletype; int i; @@ -147,6 +141,7 @@ calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, g case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: + case MONO_TYPE_STRING: case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: @@ -165,17 +160,6 @@ calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, g stack += 4; code += i < 6 ? 1 : 3; break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - stack += 4; - code += i < 6 ? 1 : 3; - break; - } - - stack += 4; - code += 5; - local++; - break; case MONO_TYPE_I8: stack += 8; code += i < 6 ? 2 : 3; @@ -208,20 +192,14 @@ calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, g case MONO_TYPE_U: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_PTR: + case MONO_TYPE_STRING: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: code += 2; break; -#if 0 - case MONO_TYPE_STRING: - code += 2; - if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && !runtime) { - code += 4; - } - break; -#endif case MONO_TYPE_I8: code += 3; break; @@ -248,7 +226,10 @@ calculate_sizes (MonoMethod *method, guint32 *local_size, guint32 *stack_size, g stack += MINFRAME + (local * 4); - fprintf (stderr, "\tstack size: %d (%d)\n\tcode size: %d\n", STACKALIGN(stack), stack, code); +#ifdef DEBUG_SPARC_TRAMP + fprintf (stderr, "\tstack size: %d (%d)\n\tcode size: %d\n", + STACKALIGN(stack), stack, code); +#endif *local_size = local; *stack_size = STACKALIGN(stack); @@ -262,18 +243,23 @@ mono_string_new_wrapper (const char *text) } MonoPIFunc -mono_create_trampoline (MonoMethod *method, int runtime) +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { - MonoMethodSignature *sig; guint32 *p, *code_buffer; guint32 local_size, stack_size, code_size; guint32 arg_pos, simpletype; + static GHashTable *cache = NULL; int i, stringp, cur_out_reg; - - sig = method->signature; - - fprintf (stderr, "\nPInvoke [start emiting] %s\n", method->name); - calculate_sizes (method, &local_size, &stack_size, &code_size, runtime); + MonoPIFunc res; + + if (!cache) + cache = g_hash_table_new ((GHashFunc)mono_signature_hash, + (GCompareFunc)mono_metadata_signature_equal); + + if ((res = (MonoPIFunc)g_hash_table_lookup(cache, sig))) + return res; + + calculate_sizes (sig, &local_size, &stack_size, &code_size, string_ctor); code_buffer = p = alloca (code_size * 4); cur_out_reg = sparc_o0; @@ -288,25 +274,6 @@ mono_create_trampoline (MonoMethod *method, int runtime) sparc_st_imm (p, sparc_i3, sparc_fp, 80); #endif - /* - * We store some local vars here to handle string pointers. - * and align to 16 byte boundary... - */ -#if 0 - if (local_size) { - x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size * 4); - stack_size = (stack_size * local_size * 4) % 16; - } else { - stack_size = stack_size % 16; - } - if (stack_size) - x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); -#endif - - /* - * %i3 has the pointer to the args. - */ - if (sig->hasthis) { sparc_mov_reg_reg (p, sparc_i2, cur_out_reg); cur_out_reg++; @@ -318,7 +285,13 @@ mono_create_trampoline (MonoMethod *method, int runtime) arg_pos = ARG_SIZE * i; if (sig->params[i]->byref) { - fprintf (stderr, "\tpushing params[%d] (byref): type=%s;\n", i, mono_type (sig->params[i]->type)); + +#ifdef DEBUG_SPARC_TRAMP + fprintf (stderr, "\tpushing params[%d] (byref):"\ + " type=%s;\n", i + ,mono_type(sig->params[i]->type)); +#endif + sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); cur_out_reg++; continue; @@ -326,7 +299,12 @@ mono_create_trampoline (MonoMethod *method, int runtime) simpletype = sig->params[i]->type; enum_marshal: - fprintf (stderr, "\tpushing params[%d]: type=%s;\n", i, mono_type (simpletype)); + +#ifdef DEBUG_SPARC_TRAMP + fprintf (stderr, "\tpushing params[%d]: type=%s;\n", + i, mono_type (simpletype)); +#endif + switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: @@ -339,6 +317,7 @@ enum_marshal: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: + case MONO_TYPE_STRING: case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: @@ -358,36 +337,6 @@ enum_marshal: cur_out_reg++; } break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - cur_out_reg++; - break; - } - -#if 0 - sparc_sethi (p, mono_string_to_utf8, sparc_l0); - sparc_or_imm (p, 0, sparc_l0, mono_string_to_utf8, sparc_l1); - - x86_push_membase (p, X86_EDX, arg_pos); - x86_mov_reg_imm (p, X86_EDX, mono_string_to_utf8); - x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - x86_push_reg (p, X86_EAX); - /* - * Store the pointer in a local we'll free later. - */ - stringp++; - x86_mov_membase_reg (p, X86_EBP, LOC_POS * stringp, X86_EAX, 4); - /* - * we didn't save the reg: restore it here. - */ - if (i > 1) - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); -#endif - fprintf (stderr, "MONO_TYPE_STRING not yet fully supported.\n"); - exit (1); - break; case MONO_TYPE_I8: sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); cur_out_reg++; @@ -415,25 +364,26 @@ enum_marshal: * Long integers are in EAX:EDX. * FP values are on the FP stack. */ -#if 0 - if (sig->ret->byref) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + if (sig->ret->byref || string_ctor) { + sparc_st (p, sparc_o0, sparc_i1, 0); } else { simpletype = sig->ret->type; + +#ifdef DEBUG_SPARC_TRAMP + fprintf (stderr, "\tret type: %s;\n", mono_type (simpletype)); +#endif + enum_retvalue: switch (simpletype) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 1); + sparc_stb (p, sparc_o0, sparc_i1, 0); break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 2); + sparc_sth (p, sparc_o0, sparc_i1, 0); break; case MONO_TYPE_I4: case MONO_TYPE_U4: @@ -443,39 +393,18 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - break; - } - - /* If the argument is non-null, then convert the value back */ - x86_alu_reg_reg (p, X86_OR, X86_EAX, X86_EAX); - x86_branch8 (p, X86_CC_EQ, 11, FALSE); - x86_push_reg (p, X86_EAX); - x86_mov_reg_imm (p, X86_EDX, mono_string_new); - x86_call_reg (p, X86_EDX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 4); - - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); + case MONO_TYPE_STRING: + case MONO_TYPE_PTR: + sparc_st (p, sparc_o0, sparc_i1, 0); break; case MONO_TYPE_R4: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_fst_membase (p, X86_ECX, 0, FALSE, TRUE); + sparc_stf (p, sparc_f0, sparc_i1, 0); break; case MONO_TYPE_R8: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_fst_membase (p, X86_ECX, 0, TRUE, TRUE); + sparc_stdf (p, sparc_f0, sparc_i1, 0); break; case MONO_TYPE_I8: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - x86_mov_membase_reg (p, X86_ECX, 4, X86_EDX, 4); + sparc_std (p, sparc_o0, sparc_i1, 0); break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { @@ -488,54 +417,105 @@ enum_retvalue: g_error ("Can't handle as return value 0x%x", sig->ret->type); } } -#endif -#if 0 - /* free the allocated strings... */ - if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) { - if (local_size) - x86_mov_reg_imm (p, X86_EDX, g_free); - for (i = 1; i <= local_size; ++i) { - x86_push_membase (p, X86_EBP, LOC_POS * i); - x86_call_reg (p, X86_EDX); - } - } -#endif /* * Standard epilog. * 8 may be 12 when returning structures (to skip unimp opcode). */ sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); - + +#if DEBUG_SPARC_TRAMP { unsigned char *inptr, *inend; inptr = (unsigned char *) code_buffer; inend = (unsigned char *) p; - printf (".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); + fprintf (stderr,".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); while (inptr < inend) { - printf (".byte 0x%x\n", *inptr); + fprintf (stderr, ".byte 0x%x\n", *inptr); inptr++; } - fflush (stdout); + fflush (stderr); } - - fprintf (stderr, "PInvoke [finish emiting] %s\n", method->name); - - /* FIXME: need to flush */ - return g_memdup (code_buffer, 4 * (p - code_buffer)); +#endif + + res = (MonoPIFunc)g_memdup (code_buffer, 4 * (p - code_buffer)); + + /* So here's the deal... + * UltraSPARC will flush a whole cache line at a time + * BUT, older SPARCs won't. + * So, be compatable and flush dwords at a time... + */ + + for (i = 0; i < ((p - code_buffer)/2); i++) + flushi((res + (i*8))); + + g_hash_table_insert(cache, sig, res); + + return res; } void * mono_create_method_pointer (MonoMethod *method) { + MonoMethodSignature *sig; + MonoJitInfo *ji; + guint32 stack_size; + unsigned char *p, *code_buffer; + + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + + sig = method->signature; + + code_buffer = p = alloca (1024); /* Ok, this might overflow. */ + + stack_size = STACKALIGN(((sig->param_count + 1) * 4) + MINFRAME); + + /* Prologue */ + /* SPARC rocks, 'nuff said */ + sparc_save_imm(p, sparc_sp, -stack_size, sparc_sp); + + /* Initialize the structure with zeros. GO GO GADGET G0! */ + sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), 0); + sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), 0); + sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), 0); + sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), 0); + + /* set the method pointer */ + /* 32 bit runtime -- Any thoughts on doing sparc64? */ + sparc_ld_imm(p, (guint32) method >> 16, 0, sparc_o0); + sparc_or_imm(p, 0, sparc_o0, (guint32) method & 0xffff, sparc_o0); + sparc_st(p, sparc_o0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)),0); + { + unsigned char *inptr, *inend; + + inptr = (unsigned char *) code_buffer; + inend = (unsigned char *) p; + + fprintf (stderr,".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); + while (inptr < inend) { + fprintf (stderr, ".byte 0x%x\n", *inptr); + inptr++; + } + fflush (stderr); + } + return NULL; } MonoMethod* mono_method_pointer_get (void *code) { + g_warning("mono_method_pointer_get: IMPLEMENT ME\n"); return NULL; } -- cgit v1.1 From b94511c33193dc728e039fa776bf3b9d5dad4e5b Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Wed, 21 Aug 2002 17:47:34 +0000 Subject: fixed delegates svn path=/trunk/mono/; revision=6862 --- ppc/tramp.c | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index e3fddb5..6fc89a1 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -509,7 +509,8 @@ mono_create_method_pointer (MonoMethod *method) MonoMethodSignature *sig; MonoJitInfo *ji; guint8 *p, *code_buffer; - guint i, align, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param; + guint i, align = 0, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param, + this_flag; guint32 simpletype; code_size = 1024; @@ -547,13 +548,22 @@ mono_create_method_pointer (MonoMethod *method) if (sig->hasthis) { ppc_stw (p, ppc_r3, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), ppc_r31); reg_param = 1; - } else { + } else if (sig->param_count) { + DEBUG (printf ("save r%d\n", 3)); ppc_stw (p, ppc_r3, local_pos, ppc_r31); local_pos += 4; reg_param = 0; } - ppc_stw (p, ppc_r4, local_pos, ppc_r31); local_pos += 4; - ppc_stw (p, ppc_r5, local_pos, ppc_r31); local_pos += 4; + + this_flag = (sig->hasthis ? 1 : 0); + if (sig->param_count) { + gint save_count = MAX (3, MIN (8, sig->param_count - 1)); + for (i = reg_param; i < save_count; i ++) { + ppc_stw (p, ppc_r4 + i, local_pos, ppc_r31); + local_pos += 4; + DEBUG (printf ("save r%d\n", 4 + i)); + } + } /* set MonoInvocation::stack_args */ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); @@ -562,12 +572,8 @@ mono_create_method_pointer (MonoMethod *method) /* add stackval arguments */ for (i = 0; i < sig->param_count; ++i) { - if (reg_param < 3 - (sig->hasthis ? 1 : 0)) { - ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - (sig->hasthis ? 1 : 0))*4); - reg_param ++; - } else if (reg_param < 8) { - ppc_stw (p, ppc_r3 + reg_param, local_pos, ppc_r31); - ppc_addi (p, ppc_r5, ppc_r31, local_pos); + if (reg_param < 8) { + ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - this_flag)*4); reg_param ++; } else { ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); @@ -584,10 +590,12 @@ mono_create_method_pointer (MonoMethod *method) ppc_blrl (p); /* fixme: alignment */ + DEBUG (printf ("arg_pos %d --> ", stackval_arg_pos)); if (sig->pinvoke) - stackval_arg_pos += mono_type_native_stack_size (sig->params [i], &align); + stackval_arg_pos += 4*mono_type_native_stack_size (sig->params [i], &align); else - stackval_arg_pos += mono_type_stack_size (sig->params [i], &align); + stackval_arg_pos += 4*mono_type_stack_size (sig->params [i], &align); + DEBUG (printf ("%d\n", stackval_arg_pos)); } /* return value storage */ -- cgit v1.1 From 82d4a3ff22ea8e8dfb9a3ec2be10657e7e25cd97 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Sat, 24 Aug 2002 23:54:12 +0000 Subject: fixed struct marshaling, 108 tests pass now svn path=/trunk/mono/; revision=7013 --- ppc/tramp.c | 170 ++++++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 131 insertions(+), 39 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 6fc89a1..efee35d 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -7,6 +7,7 @@ #include "config.h" #include +#include #include "ppc-codegen.h" #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" @@ -21,7 +22,7 @@ #endif #endif -#define DEBUG(x) +#define DEBUG(x) x /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -92,7 +93,7 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } static void inline -calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, gboolean string_ctor) +calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, gboolean string_ctor, gboolean *use_memcpy) { guint i, fr, gr; guint32 simpletype; @@ -133,17 +134,31 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, add_general (&gr, stack_size, code_size, TRUE); *code_size += 4; break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) - g_error ("can only marshal enums, not generic structures (size: %d)", - mono_class_value_size (sig->params [i]->data.klass, NULL)); - add_general (&gr, stack_size, code_size, TRUE); - *code_size += 4; + size = mono_class_value_size (sig->params [i]->data.klass, NULL); + if (size != 4) { + DEBUG(printf ("copy %d bytes struct on stack\n", + mono_class_value_size (sig->params [i]->data.klass, NULL))); + *use_memcpy = TRUE; + *code_size += 8*4; + *stack_size += (size + 3) & (~3); + if (gr > GENERAL_REGS) { + *code_size += 4; + *stack_size += 4; + } + } else { + DEBUG(printf ("load %d bytes struct\n", + mono_class_value_size (sig->params [i]->data.klass, NULL))); + add_general (&gr, stack_size, code_size, TRUE); + *code_size += 4; + } break; + } case MONO_TYPE_I8: add_general (&gr, stack_size, code_size, FALSE); break; @@ -203,10 +218,18 @@ enum_retvalue: } } + if (*use_memcpy) { + *stack_size += 2*4; /* for r14, r15 */ + *code_size += 6*4; + if (sig->hasthis) { + *stack_size += 4; /* for r16 */ + *code_size += 4; + } + } + /* align stack size to 16 */ DEBUG (printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size)); *stack_size = (*stack_size + 15) & ~15; - } static inline guint8 * @@ -219,11 +242,6 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ - ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ - ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ - - ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ - return p; } @@ -237,23 +255,82 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ stack_par_pos += 4; \ } +#define SAVE_4_VAL_IN_GENERIC_REGISTER \ + if (gr < GENERAL_REGS) { \ + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ + ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); \ + gr ++; \ + } else { \ + ppc_lwz (p, ppc_r11, i*16, ARG_BASE); \ + ppc_lwz (p, ppc_r11, 0, ppc_r11); \ + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ + stack_par_pos += 4; \ + } inline static guint8* -emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size) +emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gboolean use_memcpy) { - guint i, fr, gr, act_strs, stack_par_pos; + guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos; guint32 simpletype; fr = gr = 0; - act_strs = 0; stack_par_pos = 8; + ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ + + if (use_memcpy) { + ppc_stw (p, ppc_r14, stack_size - 16, ppc_r31); /* save r14 */ + ppc_stw (p, ppc_r15, stack_size - 20, ppc_r31); /* save r15 */ + ppc_mr (p, ppc_r14, ppc_r3); /* keep "callme" in register */ + ppc_mr (p, ppc_r15, ppc_r6); /* keep "arguments" in register */ + } else { + ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ + ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ + } + if (sig->hasthis) { - ppc_mr (p, ppc_r3, ppc_r5); + if (use_memcpy) { + ppc_stw (p, ppc_r16, stack_size - 24, ppc_r31); /* save r16 */ + ppc_mr (p, ppc_r16, ppc_r5); + } else + ppc_mr (p, ppc_r3, ppc_r5); gr ++; } - act_strs = 0; + if (use_memcpy) { + cur_struct_pos = struct_pos = stack_par_pos; + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) + continue; + if (sig->params [i]->type == MONO_TYPE_VALUETYPE && !sig->params [i]->data.klass->enumtype) { + gint size; + + size = mono_class_value_size (sig->params [i]->data.klass, NULL); + if (size != 4) { + /* call memcpy */ + ppc_addi (p, ppc_r3, ppc_r1, stack_par_pos); + ppc_lwz (p, ppc_r4, i*16, ppc_r15); + /* FIXME check if size > 0xffff */ + ppc_li (p, ppc_r5, size & 0xffff); + ppc_lis (p, ppc_r0, (guint32) memcpy >> 16); + ppc_ori (p, ppc_r0, ppc_r0, (guint32) memcpy & 0xffff); + ppc_mtlr (p, ppc_r0); + ppc_blrl (p); + stack_par_pos += (size + 3) & (~3); + } + } + } + + if (sig->hasthis) { + ppc_mr (p, ppc_r3, ppc_r16); + ppc_lwz (p, ppc_r16, stack_size - 24, ppc_r31); /* restore r16 */ + } + ppc_mr (p, ppc_r0, ppc_r14); + ppc_mr (p, ppc_r12, ppc_r15); + ppc_lwz (p, ppc_r14, stack_size - 16, ppc_r31); /* restore r14 */ + ppc_lwz (p, ppc_r15, stack_size - 20, ppc_r31); /* restore r15 */ + } + for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { SAVE_4_IN_GENERIC_REGISTER; @@ -279,31 +356,28 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size) case MONO_TYPE_SZARRAY: SAVE_4_IN_GENERIC_REGISTER; break; - /* g_warning ("untested marshaling\n"); - if (gr < GENERAL_REGS) { - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); - ppc_lwz (p, ppc_r3 + gr, G_STRUCT_OFFSET (MonoArray, vector), ppc_r3 + gr); - gr ++; - } else { - NOT_IMPLEMENTED ("save marshalled SZARRAY on stack"); - } - break; */ - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; if (sig->params [i]->data.klass->enumtype) { simpletype = sig->params [i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if (mono_class_value_size (sig->params [i]->data.klass, NULL) != 4) - g_error ("can only marshal enums, not generic structures (size: %d)", - mono_class_value_size (sig->params [i]->data.klass, NULL)); - if (gr < GENERAL_REGS) { - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); - ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); - gr ++; + size = mono_class_value_size (sig->params [i]->data.klass, NULL); + if (size == 4) { + SAVE_4_VAL_IN_GENERIC_REGISTER; } else { - NOT_IMPLEMENTED ("save value type on stack"); + if (gr < GENERAL_REGS) { + ppc_addi (p, ppc_r3 + gr, ppc_r1, cur_struct_pos); + gr ++; + } else { + ppc_lwz (p, ppc_r11, cur_struct_pos, ppc_r1); + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); + stack_par_pos += 4; + } + cur_struct_pos += (size + 3) & (~3); } break; + } case MONO_TYPE_I8: if (gr < 7) { if (gr & 1) @@ -454,13 +528,14 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { guint8 *p, *code_buffer; guint stack_size, code_size; + gboolean use_memcpy = FALSE; DEBUG (printf ("\nPInvoke [start emiting]\n")); - calculate_sizes (sig, &stack_size, &code_size, string_ctor); + calculate_sizes (sig, &stack_size, &code_size, string_ctor, &use_memcpy); p = code_buffer = alloc_code_memory (code_size); p = emit_prolog (p, sig, stack_size); - p = emit_save_parameters (p, sig, stack_size); + p = emit_save_parameters (p, sig, stack_size, use_memcpy); p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); p = emit_epilog (p, sig, stack_size); @@ -513,6 +588,20 @@ mono_create_method_pointer (MonoMethod *method) this_flag; guint32 simpletype; + /* + * If it is a static P/Invoke method, we can just return the pointer + * to the method implementation. + */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + code_size = 1024; stack_size = 1024; stack_param = 0; @@ -572,6 +661,9 @@ mono_create_method_pointer (MonoMethod *method) /* add stackval arguments */ for (i = 0; i < sig->param_count; ++i) { + /* if (vtbuf [i] >= 0) { + NOT_IMPLEMENTED ("vtbuf"); + } */ if (reg_param < 8) { ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - this_flag)*4); reg_param ++; -- cgit v1.1 From 63315827a2ebc424954f4b8baf40497a5600ce7a Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Wed, 28 Aug 2002 14:41:08 +0000 Subject: fixed valuetypes marshaling in delegates svn path=/trunk/mono/; revision=7126 --- ppc/tramp.c | 45 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index efee35d..786cee3 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -22,7 +22,7 @@ #endif #endif -#define DEBUG(x) x +#define DEBUG(x) /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -585,7 +585,8 @@ mono_create_method_pointer (MonoMethod *method) MonoJitInfo *ji; guint8 *p, *code_buffer; guint i, align = 0, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param, - this_flag; + this_flag, cpos, vt_cur; + gint *vtbuf; guint32 simpletype; /* @@ -646,7 +647,7 @@ mono_create_method_pointer (MonoMethod *method) this_flag = (sig->hasthis ? 1 : 0); if (sig->param_count) { - gint save_count = MAX (3, MIN (8, sig->param_count - 1)); + gint save_count = MIN (8, sig->param_count - 1); for (i = reg_param; i < save_count; i ++) { ppc_stw (p, ppc_r4 + i, local_pos, ppc_r31); local_pos += 4; @@ -654,6 +655,31 @@ mono_create_method_pointer (MonoMethod *method) } } + /* prepare space for valuetypes */ + vt_cur = local_pos; + vtbuf = alloca (sizeof(int)*sig->param_count); + cpos = 0; + for (i = 0; i < sig->param_count; i++) { + MonoType *type = sig->params [i]; + vtbuf [i] = -1; + if (type->type == MONO_TYPE_VALUETYPE) { + MonoClass *klass = type->data.klass; + gint size; + + if (klass->enumtype) + continue; + size = mono_class_native_size (klass, &align); + cpos += align - 1; + cpos &= ~(align - 1); + vtbuf [i] = cpos; + cpos += size; + } + } + cpos += 3; + cpos &= ~3; + + local_pos += cpos; + /* set MonoInvocation::stack_args */ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); ppc_addi (p, ppc_r0, ppc_r31, stackval_arg_pos); @@ -661,9 +687,6 @@ mono_create_method_pointer (MonoMethod *method) /* add stackval arguments */ for (i = 0; i < sig->param_count; ++i) { - /* if (vtbuf [i] >= 0) { - NOT_IMPLEMENTED ("vtbuf"); - } */ if (reg_param < 8) { ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - this_flag)*4); reg_param ++; @@ -672,8 +695,16 @@ mono_create_method_pointer (MonoMethod *method) stack_param ++; } ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); - ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); + if (vtbuf [i] >= 0) { + ppc_addi (p, ppc_r4, ppc_r31, vt_cur); + ppc_stw (p, ppc_r4, stackval_arg_pos, ppc_r31); + ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); + ppc_lwz (p, ppc_r5, 0, ppc_r5); + vt_cur += vtbuf [i]; + } else { + ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); + } ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); ppc_li (p, ppc_r6, sig->pinvoke); -- cgit v1.1 From 13eb9f4ebf45ffe17d555458cec8bbecefc71849 Mon Sep 17 00:00:00 2001 From: Radek Doulik Date: Wed, 28 Aug 2002 15:26:29 +0000 Subject: retval value type fixed svn path=/trunk/mono/; revision=7127 --- ppc/tramp.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index 786cee3..d40660e 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -22,7 +22,7 @@ #endif #endif -#define DEBUG(x) +#define DEBUG(x) x /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -209,7 +209,7 @@ enum_retvalue: simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } - NOT_IMPLEMENTED ("valuetype"); + *code_size += 2*4; break; case MONO_TYPE_VOID: break; @@ -331,6 +331,22 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo ppc_lwz (p, ppc_r15, stack_size - 20, ppc_r31); /* restore r15 */ } + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + gint size = mono_class_native_size (klass, NULL); + + DEBUG(printf ("retval value type size: %d\n", size)); + if (size > 8) { + ppc_lwz (p, ppc_r3, stack_size - 12, ppc_r31); + ppc_lwz (p, ppc_r3, 0, ppc_r3); + gr ++; + } else { + NOT_IMPLEMENTED ("retval valuetype <= 8 bytes"); + } + } + } + for (i = 0; i < sig->param_count; ++i) { if (sig->params [i]->byref) { SAVE_4_IN_GENERIC_REGISTER; @@ -497,7 +513,6 @@ enum_retvalue: simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } - NOT_IMPLEMENTED ("retval valuetype"); break; case MONO_TYPE_VOID: break; -- cgit v1.1 From fe7d0f819c55d76f0cb7a54ba66d4368d40385bd Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Thu, 19 Sep 2002 18:30:56 +0000 Subject: Beginning to add support for Solaris. Tested on Solaris 9. Shared handles are still not working, will be addressed soon. Trampoline code still broken, expect a rewrite. svn path=/trunk/mono/; revision=7622 --- sparc/tramp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sparc/tramp.c b/sparc/tramp.c index f7e9b69..7ab6698 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -510,7 +510,7 @@ mono_create_method_pointer (MonoMethod *method) fflush (stderr); } - return NULL; + return 0xdeadbeef; } MonoMethod* -- cgit v1.1 From 0110bf4a5a435c5d60583887e0e0f28b7993a4cf Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Mon, 23 Sep 2002 02:25:43 +0000 Subject: Starting rewrite of trampolining for SPARC. It needed some cleanup. It doesn't work at all now. GO PROGRESS! svn path=/trunk/mono/; revision=7728 --- ChangeLog | 6 + sparc/tramp.c | 670 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 343 insertions(+), 333 deletions(-) diff --git a/ChangeLog b/ChangeLog index 67216cd..fe0176e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2002-09-22 Mark Crichton + + * sparc/tramp.c: Completely broke trampolining on SPARC processors. + The code needed a nasty cleanup, so most of it is rewritten. + It will be fixed. + 2002-08-20 Mark Crichton * sparc/tramp.c (mono_create_trampoline): Now works on Sparc. Tested diff --git a/sparc/tramp.c b/sparc/tramp.c index 7ab6698..4d08fbc 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -12,6 +12,7 @@ #include "config.h" #include +#include #include "sparc-codegen.h" #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" @@ -19,114 +20,62 @@ #include "mono/metadata/appdomain.h" -#define FUNC_ADDR_POS sparc_i0 -#define RETVAL_POS sparc_i1 -#define THIS_POS sparc_i2 -#define ARGP_POS sparc_i3 -#define LOC_POS -4 -#define MINV_POS 8 - #define ARG_SIZE sizeof (stackval) +#define PROLOG_INS 1 +#define CALL_INS 3 /* Max 3. 2 for the load and 1 for the call */ +#define EPILOG_INS 2 +#define MINIMAL_STACK_SIZE 23 +#define FLOAT_REGS 32 +#define OUT_REGS 6 +#define LOCAL_REGS 8 /* Some assembly... */ #define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") - -/* WARNING: This code WILL BREAK. We do not currently check the status - * of the registers. Things can get trampled. You have been warned. - */ - -static const char * -mono_type (int type) +static void +add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) { - switch (type) { - case MONO_TYPE_END: - return "MONO_TYPE_END"; - case MONO_TYPE_VOID: - return "MONO_TYPE_VOID"; - case MONO_TYPE_BOOLEAN: - return "MONO_TYPE_BOOLEAN"; - case MONO_TYPE_CHAR: - return "MONO_TYPE_CHAR"; - case MONO_TYPE_I1: - return "MONO_TYPE_I1"; - case MONO_TYPE_U1: - return "MONO_TYPE_U1"; - case MONO_TYPE_I2: - return "MONO_TYPE_I2"; - case MONO_TYPE_U2: - return "MONO_TYPE_U2"; - case MONO_TYPE_I4: - return "MONO_TYPE_I4"; - case MONO_TYPE_U4: - return "MONO_TYPE_U4"; - case MONO_TYPE_I8: - return "MONO_TYPE_I8"; - case MONO_TYPE_U8: - return "MONO_TYPE_U8"; - case MONO_TYPE_R4: - return "MONO_TYPE_R4"; - case MONO_TYPE_R8: - return "MONO_TYPE_R8"; - case MONO_TYPE_STRING: - return "MONO_TYPE_STRING"; - case MONO_TYPE_PTR: - return "MONO_TYPE_PTR"; - case MONO_TYPE_BYREF: - return "MONO_TYPE_BYREF"; - case MONO_TYPE_VALUETYPE: - return "MONO_TYPE_VALUETYPE"; - case MONO_TYPE_CLASS: - return "MONO_TYPE_CLASS"; - case MONO_TYPE_ARRAY: - return "MONO_TYPE_ARRAY"; - case MONO_TYPE_TYPEDBYREF: - return "MONO_TYPE_TYPEBYREF"; - case MONO_TYPE_I: - return "MONO_TYPE_I"; - case MONO_TYPE_U: - return "MONO_TYPE_U"; - case MONO_TYPE_FNPTR: - return "MONO_TYPE_FNPTR"; - case MONO_TYPE_OBJECT: - return "MONO_TYPE_OBJECT"; - case MONO_TYPE_SZARRAY: - return "MONO_TYPE_SZARRAY"; - case MONO_TYPE_CMOD_REQD: - return "MONO_TYPE_CMOD_REQD"; - case MONO_TYPE_CMOD_OPT: - return "MONO_TYPE_CMOD_OPT"; - case MONO_TYPE_INTERNAL: - return "MONO_TYPE_INTERNAL"; - case MONO_TYPE_MODIFIER: - return "MONO_TYPE_MODIFIER"; - case MONO_TYPE_SENTINEL: - return "MONO_TYPE_SENTINEL"; - case MONO_TYPE_PINNED: - return "MONO_TYPE_PINNED"; + if (simple) { + if (*gr >= OUT_REGS) { + *stack_size += 4; + *code_size += 8; + } else { + *code_size += 4; + } + } else { + if (*gr >= OUT_REGS - 1) { + *stack_size += 8 + (*stack_size % 8); + *code_size += 16; + } else { + *code_size += 16; + } + if ((*gr) && 1) + (*gr)++; + (*gr)++; } - - return "??"; + (*gr)++; } static void -calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_size, guint32 *code_size, gboolean string_ctor) +calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, + gboolean string_ctor, gboolean *use_memcpy) { - guint32 local = 0, stack = 0, code = 6; + guint i, fr, gr; guint32 simpletype; - int i; + + fr = gr = 0; + *stack_size = MINIMAL_STACK_SIZE * 4; + *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS) * 4; /* function arguments */ if (sig->hasthis) - code++; + add_general (&gr, stack_size, code_size, TRUE); for (i = 0; i < sig->param_count; i++) { if (sig->params[i]->byref) { - stack += sizeof (gpointer); - code += i < 6 ? 1 : 3; + add_general (&gr, stack_size, code_size, TRUE); continue; } - simpletype = sig->params[i]->type; enum_calc_size: switch (simpletype) { @@ -141,32 +90,39 @@ calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_s case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_STRING: - case MONO_TYPE_R4: - case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: - stack += 4; - code += i < 6 ? 1 : 3; + case MONO_TYPE_STRING: + case MONO_TYPE_R4: + add_general (&gr, stack_size, code_size, TRUE); break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_SZARRAY: + add_general (&gr, stack_size, code_size, TRUE); + *code_size += 4; + case MONO_TYPE_VALUETYPE: { + gint size; if (sig->params[i]->data.klass->enumtype) { simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if (mono_class_value_size (sig->params[i]->data.klass, NULL) != 4) - g_error ("can only marshal enums, not generic structures (size: %d)", - mono_class_value_size (sig->params[i]->data.klass, NULL)); - stack += 4; - code += i < 6 ? 1 : 3; - break; + size = mono_class_value_size (sig->params[i]->data.klass, NULL); + if (size != 4) { + *use_memcpy = TRUE; + *code_size += 8*4; + *stack_size += (size+3)&(~3); + if (gr > OUT_REGS) { + *code_size += 4; + *stack_size += 4; + } else { + add_general (&gr, stack_size, code_size, TRUE); + *code_size += 4; + } + break; + } + } case MONO_TYPE_I8: - stack += 8; - code += i < 6 ? 2 : 3; - break; case MONO_TYPE_R8: - stack += 8; - code += i < 6 ? 2 : 3; + add_general (&gr, stack_size, code_size, FALSE); break; default: g_error ("Can't trampoline 0x%x", sig->params[i]->type); @@ -175,7 +131,7 @@ calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_s /* function return value */ if (sig->ret->byref) { - code += 2; + *code_size += 8; } else { simpletype = sig->ret->type; enum_retvalue: @@ -198,17 +154,17 @@ calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_s case MONO_TYPE_R8: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: - code += 2; + *code_size += 8; break; case MONO_TYPE_I8: - code += 3; + *code_size += 12; break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } - code += 2; + *code_size += 8; break; case MONO_TYPE_VOID: break; @@ -217,231 +173,321 @@ calculate_sizes (MonoMethodSignature *sig, guint32 *local_size, guint32 *stack_s } } -#define STACKALIGN(x) (((x) + 15) & (~15)) -#define MINFRAME ((16 + 1 + 6) * 4) /* minimum size stack frame, in bytes: - * 16 for registers, 1 for "hidden param", - * and 6 in which a callee can store it's - * arguments. - */ + if (*use_memcpy) { + *stack_size += 8; + *code_size += 24; + if (sig->hasthis) { + *stack_size += 4; + *code_size += 4; + } + } - stack += MINFRAME + (local * 4); + *stack_size = (*stack_size + 15) & (~15); +} -#ifdef DEBUG_SPARC_TRAMP - fprintf (stderr, "\tstack size: %d (%d)\n\tcode size: %d\n", - STACKALIGN(stack), stack, code); -#endif +static inline guint32 * +emit_epilog (guint32 *p, MonoMethodSignature *sig, guint stack_size) +{ + /* + * Standard epilog. + * 8 may be 12 when returning structures (to skip unimp opcode). + */ + sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); + sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); - *local_size = local; - *stack_size = STACKALIGN(stack); - *code_size = code; + return p; } -static MonoString * -mono_string_new_wrapper (const char *text) +static inline guint32 * +emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size) { - return text ? mono_string_new (mono_domain_get (), text) : NULL; + /* yes kids, it is this simple! */ + sparc_save_imm (p, sparc_sp, -stack_size, sparc_sp); + return p; } -MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +#define ARG_BASE sparc_l2 /* use local #2 */ +#define SAVE_4_IN_GENERIC_REGISTER \ + if (gr < OUT_REGS) { \ + sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); \ + gr++; \ + } else { \ + sparc_ld_imm (p, ARG_BASE, i*16, sparc_l0); \ + sparc_st_imm (p, sparc_l1, sparc_sp, stack_par_pos); \ + stack_par_pos += 4; \ + } + +#define SAVE_4_VAL_IN_GENERIC_REGISTER \ + if (gr < OUT_REGS) { \ + sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); \ + sparc_ld (p, sparc_o0 + gr, 0, sparc_o0 + gr); \ + gr++; \ + } else { \ + sparc_ld_imm (p, ARG_BASE, i*16, sparc_l0); \ + sparc_ld_imm (p, sparc_l1, i*16, sparc_l0); \ + sparc_st_imm (p, sparc_l1, sparc_sp, stack_par_pos); \ + stack_par_pos += 4; \ + } + +inline static guint32* +emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, + gboolean use_memcpy) { - guint32 *p, *code_buffer; - guint32 local_size, stack_size, code_size; - guint32 arg_pos, simpletype; - static GHashTable *cache = NULL; - int i, stringp, cur_out_reg; - MonoPIFunc res; + guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos; + guint32 simpletype; - if (!cache) - cache = g_hash_table_new ((GHashFunc)mono_signature_hash, - (GCompareFunc)mono_metadata_signature_equal); + fr = gr = 0; + stack_par_pos = MINIMAL_STACK_SIZE * 4; - if ((res = (MonoPIFunc)g_hash_table_lookup(cache, sig))) - return res; + sparc_st_imm (p, sparc_i1, sparc_sp, stack_size - 12); /* retval */ + + if (use_memcpy) { + sparc_st_imm (p, sparc_l4, sparc_sp, stack_size - 16); + sparc_st_imm (p, sparc_l5, sparc_sp, stack_size - 20); + sparc_mov_reg_reg (p, sparc_i0, sparc_l4); + sparc_mov_reg_reg (p, sparc_i3, sparc_l5); + } else { + sparc_mov_reg_reg (p, sparc_i3, sparc_l2); + sparc_mov_reg_reg (p, sparc_i0, sparc_l0); + } - calculate_sizes (sig, &local_size, &stack_size, &code_size, string_ctor); - - code_buffer = p = alloca (code_size * 4); - cur_out_reg = sparc_o0; - - /* Standard function prolog. */ - sparc_save_imm (p, sparc_sp, -stack_size, sparc_sp); -#if 0 - /* gcc seems to want to store %i0 through %i3 for some reason */ - sparc_st_imm (p, sparc_i0, sparc_fp, 68); - sparc_st_imm (p, sparc_i1, sparc_fp, 72); - sparc_st_imm (p, sparc_i2, sparc_fp, 76); - sparc_st_imm (p, sparc_i3, sparc_fp, 80); -#endif - if (sig->hasthis) { - sparc_mov_reg_reg (p, sparc_i2, cur_out_reg); - cur_out_reg++; + if (use_memcpy) { + sparc_st_imm (p, sparc_l6, sparc_sp, stack_size - 16); + sparc_mov_reg_reg (p, sparc_i2, sparc_l6); + } else + sparc_mov_reg_reg (p, sparc_i2, sparc_o0); + gr ++; } - - /* Push arguments in reverse order. */ - stringp = 0; - for (i = 0; i < sig->param_count; i++) { - arg_pos = ARG_SIZE * i; - - if (sig->params[i]->byref) { -#ifdef DEBUG_SPARC_TRAMP - fprintf (stderr, "\tpushing params[%d] (byref):"\ - " type=%s;\n", i - ,mono_type(sig->params[i]->type)); -#endif + if (use_memcpy) { + cur_struct_pos = struct_pos = stack_par_pos; + for (i = 0; i < sig->param_count; i++) { + if (sig->params[i]->byref) + continue; + if (sig->params[i]->type == MONO_TYPE_VALUETYPE && + !sig->params[i]->data.klass->enumtype) { + gint size; + + size = mono_class_value_size (sig->params[i]->data.klass, NULL); + if (size != 4) { + /* need to call memcpy here */ + sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); + sparc_ld_imm (p, sparc_l5, i*16, sparc_o1); + sparc_or_imm (p, 0, sparc_g0, size & 0xffff, sparc_o2); + sparc_sethi (p, (guint32)memcpy, sparc_l0); + sparc_or_imm (p, 0, sparc_l0, (guint32)memcpy & 0x3ff, sparc_l0); + sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); + sparc_nop (p); + stack_par_pos += (size + 3) & (~3); + } + } + } - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - cur_out_reg++; - continue; + if (sig->hasthis) { + sparc_mov_reg_reg (p, sparc_l6, sparc_o0); + sparc_ld (p, sparc_sp, stack_size - 24, sparc_l6); } - - simpletype = sig->params[i]->type; -enum_marshal: -#ifdef DEBUG_SPARC_TRAMP - fprintf (stderr, "\tpushing params[%d]: type=%s;\n", - i, mono_type (simpletype)); -#endif + sparc_mov_reg_reg (p, sparc_l4, sparc_l0); + sparc_mov_reg_reg (p, sparc_l5, sparc_l2); + sparc_ld_imm (p, sparc_sp, stack_size - 16, sparc_l4); + sparc_ld_imm (p, sparc_sp, stack_size - 20, sparc_l5); + } + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + gint size = mono_class_native_size (klass, NULL); + if (size > 8) { + sparc_ld_imm (p, sparc_sp, stack_size - 12, + sparc_o0); + sparc_ld_imm (p, sparc_o0, 0, sparc_o0); + gr ++; + } else { + g_error ("FIXME: size > 8 not implemented"); + } + } + } + + for (i = 0; i < sig->param_count; i++) { + if (sig->params[i]->byref) { + SAVE_4_IN_GENERIC_REGISTER; + continue; + } + simpletype = sig->params[i]->type; + enum_calc_size: switch (simpletype) { case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_STRING: - case MONO_TYPE_R4: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - cur_out_reg++; + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + SAVE_4_IN_GENERIC_REGISTER; break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; + g_warning ("tramp: MONO_TYPE_VALUETYPE"); if (sig->params[i]->data.klass->enumtype) { - /* it's an enum value */ simpletype = sig->params[i]->data.klass->enum_basetype->type; - goto enum_marshal; + goto enum_calc_size; + } + size = mono_class_value_size (sig->params[i]->data.klass, NULL); + if (size == 4) { + g_warning("size is 4"); + SAVE_4_VAL_IN_GENERIC_REGISTER; } else { - /*sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg);*/ - sparc_ld_imm (p, sparc_i3, arg_pos, sparc_l0); - sparc_ld (p, sparc_l0, 0, cur_out_reg); - cur_out_reg++; + if (gr < OUT_REGS) { + sparc_add_imm (p, 0, sparc_sp, cur_struct_pos, sparc_o0 + gr); + gr ++; + } else { + sparc_ld_imm (p, sparc_sp, cur_struct_pos, sparc_l1); + sparc_st_imm (p, sparc_l1, stack_par_pos, sparc_sp); + } + cur_struct_pos += (size + 3) & (~3); } break; + } + case MONO_TYPE_I8: - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - cur_out_reg++; - sparc_ld_imm (p, sparc_i3, arg_pos + 4, cur_out_reg); - cur_out_reg++; - break; + case MONO_TYPE_R4: case MONO_TYPE_R8: - sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg); - cur_out_reg++; - sparc_ld_imm (p, sparc_i3, arg_pos + 4, cur_out_reg); - cur_out_reg++; + /* this will break in subtle ways... */ + if (gr < 5) { + if (gr & 1) + gr ++; + sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); + gr ++; + sparc_ld_imm (p, ARG_BASE, i*16 + 4, sparc_o0 + gr); + gr ++; + } else { + g_error("FIXME: i8r4r8 on stack"); + } break; default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); + g_error ("Can't trampoline 0x%x", sig->params[i]->type); } } - - /* call the function */ + + return p; +} + +static inline guint32 * +alloc_code_memory (guint code_size) +{ + guint32 *p; + + p = g_malloc(code_size); + + return p; +} + +static inline guint32 * +emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, + guint stack_size, gboolean string_ctor) +{ + guint32 simpletype; + + /* call "callme" */ sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite); sparc_nop (p); - - /* - * Handle retval. - * Small integer and pointer values are in EAX. - * Long integers are in EAX:EDX. - * FP values are on the FP stack. - */ + + /* get return value */ if (sig->ret->byref || string_ctor) { - sparc_st (p, sparc_o0, sparc_i1, 0); + sparc_ld_imm (p, sparc_sp, stack_size - 12, sparc_o5); + sparc_st_imm (p, sparc_o5, 0, sparc_i0); } else { simpletype = sig->ret->type; - -#ifdef DEBUG_SPARC_TRAMP - fprintf (stderr, "\tret type: %s;\n", mono_type (simpletype)); -#endif - -enum_retvalue: + enum_retval: switch (simpletype) { case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - sparc_stb (p, sparc_o0, sparc_i1, 0); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - sparc_sth (p, sparc_o0, sparc_i1, 0); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - case MONO_TYPE_PTR: - sparc_st (p, sparc_o0, sparc_i1, 0); - break; - case MONO_TYPE_R4: - sparc_stf (p, sparc_f0, sparc_i1, 0); - break; - case MONO_TYPE_R8: - sparc_stdf (p, sparc_f0, sparc_i1, 0); - break; - case MONO_TYPE_I8: - sparc_std (p, sparc_o0, sparc_i1, 0); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); + case MONO_TYPE_I1: + case MONO_TYPE_U1: + sparc_stb (p, sparc_o0, sparc_i1, 0); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + sparc_sth (p, sparc_o0, sparc_i1, 0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + case MONO_TYPE_PTR: + sparc_st (p, sparc_o0, sparc_i1, 0); + break; + case MONO_TYPE_R4: + sparc_stf (p, sparc_f0, sparc_i1, 0); + break; + case MONO_TYPE_R8: + sparc_stdf (p, sparc_f0, sparc_i1, 0); + break; + case MONO_TYPE_I8: + sparc_std (p, sparc_o0, sparc_i1, 0); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retval; + } + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); } } + return p; +} + +MonoPIFunc +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + guint32 *p, *code_buffer; + guint stack_size, code_size, i; + gboolean use_memcpy = FALSE; + static GHashTable *cache = NULL; + MonoPIFunc res; + + if (!cache) + cache = g_hash_table_new ((GHashFunc)mono_signature_hash, + (GCompareFunc)mono_metadata_signature_equal); - /* - * Standard epilog. - * 8 may be 12 when returning structures (to skip unimp opcode). - */ - sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); - sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); + if ((res = (MonoPIFunc)g_hash_table_lookup(cache, sig))) + return res; + + calculate_sizes (sig, &stack_size, &code_size, + string_ctor, &use_memcpy); -#if DEBUG_SPARC_TRAMP + p = code_buffer = alloc_code_memory (code_size); + p = emit_prolog (p, sig, stack_size); + p = emit_save_parameters (p, sig, stack_size, use_memcpy); + p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); + p = emit_epilog (p, sig, stack_size); + { - unsigned char *inptr, *inend; - - inptr = (unsigned char *) code_buffer; - inend = (unsigned char *) p; - - fprintf (stderr,".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); - while (inptr < inend) { - fprintf (stderr, ".byte 0x%x\n", *inptr); - inptr++; - } - fflush (stderr); + guchar *cp; + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + for (cp = code_buffer; cp < p; cp++) { + printf (".byte 0x%x\n", *cp); + } } -#endif - - res = (MonoPIFunc)g_memdup (code_buffer, 4 * (p - code_buffer)); /* So here's the deal... * UltraSPARC will flush a whole cache line at a time @@ -450,11 +496,11 @@ enum_retvalue: */ for (i = 0; i < ((p - code_buffer)/2); i++) - flushi((res + (i*8))); + flushi((code_buffer + (i*8))); - g_hash_table_insert(cache, sig, res); + g_hash_table_insert(cache, sig, code_buffer); - return res; + return (MonoPIFunc) code_buffer; } void * @@ -475,47 +521,5 @@ mono_create_method_pointer (MonoMethod *method) return method->addr; } - sig = method->signature; - - code_buffer = p = alloca (1024); /* Ok, this might overflow. */ - - stack_size = STACKALIGN(((sig->param_count + 1) * 4) + MINFRAME); - - /* Prologue */ - /* SPARC rocks, 'nuff said */ - sparc_save_imm(p, sparc_sp, -stack_size, sparc_sp); - - /* Initialize the structure with zeros. GO GO GADGET G0! */ - sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), 0); - sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), 0); - sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), 0); - sparc_st(p, sparc_g0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), 0); - - /* set the method pointer */ - /* 32 bit runtime -- Any thoughts on doing sparc64? */ - sparc_ld_imm(p, (guint32) method >> 16, 0, sparc_o0); - sparc_or_imm(p, 0, sparc_o0, (guint32) method & 0xffff, sparc_o0); - sparc_st(p, sparc_o0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)),0); - { - unsigned char *inptr, *inend; - - inptr = (unsigned char *) code_buffer; - inend = (unsigned char *) p; - - fprintf (stderr,".text\n.align 4\n.globl main\n.type main,function\nmain:\n"); - while (inptr < inend) { - fprintf (stderr, ".byte 0x%x\n", *inptr); - inptr++; - } - fflush (stderr); - } - return 0xdeadbeef; } - -MonoMethod* -mono_method_pointer_get (void *code) -{ - g_warning("mono_method_pointer_get: IMPLEMENT ME\n"); - return NULL; -} -- cgit v1.1 From a9d8f44092c7c313efae893ff64306dc92985110 Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Wed, 25 Sep 2002 01:52:30 +0000 Subject: arch/sparc/tramp.c: Fixed once again. Now works, mostly. io-layer/atomic.h: It's sparc on gcc/solaris, and __sparc__ on gcc/linux. had to add an #ifdef. svn path=/trunk/mono/; revision=7798 --- ChangeLog | 6 ++++ sparc/tramp.c | 105 ++++++++++++++++++++++------------------------------------ 2 files changed, 45 insertions(+), 66 deletions(-) diff --git a/ChangeLog b/ChangeLog index fe0176e..7c5a525 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2002-09-24 Mark Crichton + + * sparc/tramp.c: Works as well as the old code did. Cleanup is + finished. The framework now for adding more type support is in, + and should be a *lot* cleaner. + 2002-09-22 Mark Crichton * sparc/tramp.c: Completely broke trampolining on SPARC processors. diff --git a/sparc/tramp.c b/sparc/tramp.c index 4d08fbc..1c93c7a 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -22,13 +22,15 @@ #define ARG_SIZE sizeof (stackval) #define PROLOG_INS 1 -#define CALL_INS 3 /* Max 3. 2 for the load and 1 for the call */ +#define CALL_INS 2 /* Max 2. 1 for the jmpl and 1 for the nop */ #define EPILOG_INS 2 #define MINIMAL_STACK_SIZE 23 #define FLOAT_REGS 32 #define OUT_REGS 6 #define LOCAL_REGS 8 +#define NOT_IMPL(x) g_error("FIXME: %s", x); + /* Some assembly... */ #define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") @@ -38,7 +40,7 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) if (simple) { if (*gr >= OUT_REGS) { *stack_size += 4; - *code_size += 8; + *code_size += 12; } else { *code_size += 4; } @@ -94,11 +96,9 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_R4: - add_general (&gr, stack_size, code_size, TRUE); - break; case MONO_TYPE_SZARRAY: add_general (&gr, stack_size, code_size, TRUE); - *code_size += 4; + break; case MONO_TYPE_VALUETYPE: { gint size; if (sig->params[i]->data.klass->enumtype) { @@ -107,16 +107,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } size = mono_class_value_size (sig->params[i]->data.klass, NULL); if (size != 4) { - *use_memcpy = TRUE; - *code_size += 8*4; - *stack_size += (size+3)&(~3); - if (gr > OUT_REGS) { - *code_size += 4; - *stack_size += 4; - } else { - add_general (&gr, stack_size, code_size, TRUE); - *code_size += 4; - } + NOT_IMPL("size != 4") break; } } @@ -206,30 +197,25 @@ emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size) return p; } -#define ARG_BASE sparc_l2 /* use local #2 */ +#define ARG_BASE sparc_i3 /* pointer to args in i3 */ #define SAVE_4_IN_GENERIC_REGISTER \ - if (gr < OUT_REGS) { \ - sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); \ - gr++; \ - } else { \ - sparc_ld_imm (p, ARG_BASE, i*16, sparc_l0); \ - sparc_st_imm (p, sparc_l1, sparc_sp, stack_par_pos); \ - stack_par_pos += 4; \ - } + if (gr < OUT_REGS) { \ + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \ + gr++; \ + } else { \ + g_error("FIXME: SAVE_4_IN_GENERIC_REGISTER"); \ + } #define SAVE_4_VAL_IN_GENERIC_REGISTER \ - if (gr < OUT_REGS) { \ - sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); \ - sparc_ld (p, sparc_o0 + gr, 0, sparc_o0 + gr); \ - gr++; \ - } else { \ - sparc_ld_imm (p, ARG_BASE, i*16, sparc_l0); \ - sparc_ld_imm (p, sparc_l1, i*16, sparc_l0); \ - sparc_st_imm (p, sparc_l1, sparc_sp, stack_par_pos); \ - stack_par_pos += 4; \ - } - -inline static guint32* + if (gr < OUT_REGS) { \ + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ + sparc_ld (p, sparc_l0, 0, sparc_o0 + gr); \ + gr++; \ + } else { \ + g_error("FIXME: SAVE_4_VAL_IN_GENERIC_REGISTER"); \ + } + +static inline guint32* emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, gboolean use_memcpy) { @@ -239,27 +225,15 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, fr = gr = 0; stack_par_pos = MINIMAL_STACK_SIZE * 4; - sparc_st_imm (p, sparc_i1, sparc_sp, stack_size - 12); /* retval */ - - if (use_memcpy) { - sparc_st_imm (p, sparc_l4, sparc_sp, stack_size - 16); - sparc_st_imm (p, sparc_l5, sparc_sp, stack_size - 20); - sparc_mov_reg_reg (p, sparc_i0, sparc_l4); - sparc_mov_reg_reg (p, sparc_i3, sparc_l5); - } else { - sparc_mov_reg_reg (p, sparc_i3, sparc_l2); - sparc_mov_reg_reg (p, sparc_i0, sparc_l0); - } - if (sig->hasthis) { if (use_memcpy) { - sparc_st_imm (p, sparc_l6, sparc_sp, stack_size - 16); - sparc_mov_reg_reg (p, sparc_i2, sparc_l6); + NOT_IMPL("emit_save_parameters: use_memcpy #1") } else sparc_mov_reg_reg (p, sparc_i2, sparc_o0); gr ++; } +#if 0 if (use_memcpy) { cur_struct_pos = struct_pos = stack_par_pos; for (i = 0; i < sig->param_count; i++) { @@ -310,6 +284,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, } } } +#endif for (i = 0; i < sig->param_count; i++) { if (sig->params[i]->byref) { @@ -327,6 +302,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: + case MONO_TYPE_R4: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: @@ -338,41 +314,39 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, break; case MONO_TYPE_VALUETYPE: { gint size; - g_warning ("tramp: MONO_TYPE_VALUETYPE"); if (sig->params[i]->data.klass->enumtype) { simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } size = mono_class_value_size (sig->params[i]->data.klass, NULL); if (size == 4) { - g_warning("size is 4"); SAVE_4_VAL_IN_GENERIC_REGISTER; } else { - if (gr < OUT_REGS) { - sparc_add_imm (p, 0, sparc_sp, cur_struct_pos, sparc_o0 + gr); - gr ++; - } else { - sparc_ld_imm (p, sparc_sp, cur_struct_pos, sparc_l1); - sparc_st_imm (p, sparc_l1, stack_par_pos, sparc_sp); - } - cur_struct_pos += (size + 3) & (~3); + NOT_IMPL("emit_save_parameters: size != 4") } break; } case MONO_TYPE_I8: - case MONO_TYPE_R4: case MONO_TYPE_R8: /* this will break in subtle ways... */ if (gr < 5) { if (gr & 1) gr ++; - sparc_ld_imm (p, ARG_BASE, i*16, sparc_o0 + gr); + sparc_ld_imm (p, ARG_BASE, ARG_SIZE, sparc_o0 + gr); gr ++; - sparc_ld_imm (p, ARG_BASE, i*16 + 4, sparc_o0 + gr); + + if (gr >= OUT_REGS) { + NOT_IMPL("split reg/stack") + break; + } else { + sparc_ld_imm (p, ARG_BASE, + ARG_SIZE + 4, + sparc_o0 + gr); + } gr ++; } else { - g_error("FIXME: i8r4r8 on stack"); + NOT_IMPL("FIXME: I8/R8 on stack"); } break; default: @@ -405,8 +379,7 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, /* get return value */ if (sig->ret->byref || string_ctor) { - sparc_ld_imm (p, sparc_sp, stack_size - 12, sparc_o5); - sparc_st_imm (p, sparc_o5, 0, sparc_i0); + sparc_st (p, sparc_o0, sparc_i1, 0); } else { simpletype = sig->ret->type; enum_retval: -- cgit v1.1 From e5d299dd18e820d33cf1d74e0e2de53e163cc07b Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Wed, 25 Sep 2002 04:50:10 +0000 Subject: Stupid off-by-one error fixed. The problem was that I incremented gr as if we were on a PPC box. Sparc doesn't need such "alignment" of the registers. svn path=/trunk/mono/; revision=7800 --- ChangeLog | 5 ++ sparc/tramp.c | 244 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 240 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index 7c5a525..237d081 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-09-25 Mark Crichton + + * sparc/tramp.c: Off by one error. Whoops. Trampoline code should + now work properly. + 2002-09-24 Mark Crichton * sparc/tramp.c: Works as well as the old code did. Cleanup is diff --git a/sparc/tramp.c b/sparc/tramp.c index 1c93c7a..162a99b 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -18,6 +18,7 @@ #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" +#include "mono/metadata/debug-helpers.h" #define ARG_SIZE sizeof (stackval) @@ -34,6 +35,29 @@ /* Some assembly... */ #define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") +static char* +sig_to_name (MonoMethodSignature *sig, const char *prefix) +{ + int i; + char *result; + GString *res = g_string_new (""); + + if (prefix) { + g_string_append (res, prefix); + g_string_append_c (res, '_'); + } + + mono_type_get_desc (res, sig->ret, TRUE); + + for (i = 0; i < sig->param_count; ++i) { + g_string_append_c (res, '_'); + mono_type_get_desc (res, sig->params [i], TRUE); + } + result = res->str; + g_string_free (res, FALSE); + return result; +} + static void add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) { @@ -51,8 +75,6 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) } else { *code_size += 16; } - if ((*gr) && 1) - (*gr)++; (*gr)++; } (*gr)++; @@ -221,6 +243,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, { guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos; guint32 simpletype; + GString *res = g_string_new(""); fr = gr = 0; stack_par_pos = MINIMAL_STACK_SIZE * 4; @@ -286,6 +309,8 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, } #endif + fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)); + for (i = 0; i < sig->param_count; i++) { if (sig->params[i]->byref) { SAVE_4_IN_GENERIC_REGISTER; @@ -331,8 +356,6 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_R8: /* this will break in subtle ways... */ if (gr < 5) { - if (gr & 1) - gr ++; sparc_ld_imm (p, ARG_BASE, ARG_SIZE, sparc_o0 + gr); gr ++; @@ -454,13 +477,15 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); p = emit_epilog (p, sig, stack_size); +#if 1 { guchar *cp; - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + fprintf (stderr,".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); for (cp = code_buffer; cp < p; cp++) { - printf (".byte 0x%x\n", *cp); + fprintf (stderr, ".byte 0x%x\n", *cp); } } +#endif /* So here's the deal... * UltraSPARC will flush a whole cache line at a time @@ -476,13 +501,18 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) return (MonoPIFunc) code_buffer; } +#define MINV_POS (MINIMAL_STACK_SIZE * 4) void * mono_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; - guint32 stack_size; - unsigned char *p, *code_buffer; + guint stack_size, code_size, stackval_arg_pos, local_pos; + guint i, local_start, reg_param, stack_param, this_flag, cpos, vt_cur; + guint align = 0; + guint32 *p, *code_buffer; + gint *vtbuf; + gint32 simpletype; if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { ji = g_new0 (MonoJitInfo, 1); @@ -494,5 +524,201 @@ mono_create_method_pointer (MonoMethod *method) return method->addr; } - return 0xdeadbeef; + code_size = 1024; + stack_size = 1024; + stack_param = 0; + + sig = method->signature; + + p = code_buffer = g_malloc (code_size); + + emit_prolog (p, sig, stack_size); + + /* fill MonoInvocation */ + sparc_st (p, sparc_g0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); + sparc_st (p, sparc_g0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); + sparc_st (p, sparc_g0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); + sparc_st (p, sparc_g0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); + + sparc_set (p, (guint32)method, sparc_l0); + sparc_st (p, sparc_l0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); + + local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + + (sig->param_count + 1) * sizeof (stackval); + + if (sig->hasthis) { + sparc_st (p, sparc_i0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + reg_param = 1; + } else if (sig->param_count) { + sparc_st (p, sparc_i0, sparc_sp, local_pos); + local_pos += 4; + reg_param = 0; + } + + this_flag = (sig->hasthis ? 1 : 0); + + if (sig->param_count) { + gint save_count = MIN (OUT_REGS, sig->param_count - 1); + for (i = reg_param; i < save_count; i++) { + sparc_st (p, sparc_i1, sparc_sp, local_pos); + local_pos += 4; + } + } + + /* prepare space for valuetypes */ + vt_cur = local_pos; + vtbuf = alloca (sizeof(int)*sig->param_count); + cpos = 0; + for (i = 0; i < sig->param_count; i++) { + MonoType *type = sig->params [i]; + vtbuf [i] = -1; + if (type->type == MONO_TYPE_VALUETYPE) { + MonoClass *klass = type->data.klass; + gint size; + + if (klass->enumtype) + continue; + size = mono_class_native_size (klass, &align); + cpos += align - 1; + cpos &= ~(align - 1); + vtbuf [i] = cpos; + cpos += size; + } + } + cpos += 3; + cpos &= ~3; + + local_pos += cpos; + + /* set MonoInvocation::stack_args */ + stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); + sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); + sparc_st (p, sparc_l0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); + + /* add stackval arguments */ + /* something is bizzare here... */ + for (i=0; i < sig->param_count; i++) { + if (reg_param < OUT_REGS) { + sparc_add_imm (p, 0, sparc_sp, + local_start + (reg_param - this_flag)*4, + sparc_o2); + reg_param++; + } else { + sparc_add_imm (p, 0, sparc_sp, + stack_size + 8 + stack_param, sparc_o2); + stack_param++; + } + + if (vtbuf[i] >= 0) { + sparc_add_imm (p, 0, sparc_sp, vt_cur, sparc_o1); + sparc_st (p, sparc_o1, sparc_sp, stackval_arg_pos); + sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, + sparc_o1); + sparc_ld (p, sparc_o2, 0, sparc_o2); + vt_cur += vtbuf[i]; + } else { + sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, + sparc_o1); + } + + sparc_set (p, sparc_o0, (guint32)sig->params[i]); + + /* YOU make the CALL! */ + sparc_set (p, sparc_l0, (guint32)stackval_from_data); + sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); + sparc_nop (p); + + if (sig->pinvoke) + stackval_arg_pos += 4 * + mono_type_native_stack_size (sig->params[i], + &align); + else + stackval_arg_pos += 4 * + mono_type_stack_size (sig->params[i], &align); + } + + /* return value storage */ + if (sig->param_count) { + sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); + } + + sparc_st (p, sparc_g0, sparc_sp, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); + + /* call ves_exec_method */ + sparc_add_imm (p, 0, sparc_o0, MINV_POS, sparc_sp); + sparc_set (p, sparc_l0, (guint32)ves_exec_method); + sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); + sparc_nop (p); + + /* move retval from stackval to proper place (r3/r4/...) */ + if (sig->ret->byref) { + sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0 ); + } else { + enum_retvalue: + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + sparc_ldub (p, sparc_sp, stackval_arg_pos, sparc_i0); + break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + sparc_lduh (p, sparc_sp, stackval_arg_pos, sparc_i0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0); + break; + case MONO_TYPE_I8: + sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_ld (p, sparc_sp, stackval_arg_pos + 4, sparc_i1); + break; + case MONO_TYPE_R4: + sparc_ldf (p, sparc_sp, stackval_arg_pos, sparc_f0); + break; + case MONO_TYPE_R8: + sparc_lddf (p, sparc_sp, stackval_arg_pos, sparc_f0); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + NOT_IMPL("value type as ret val from delegate"); + break; + default: + g_error ("Type 0x%x not handled yet in thunk creation", + sig->ret->type); + break; + } + } + + emit_epilog (p, sig, stack_size); + + for (i = 0; i < ((p - code_buffer)/2); i++) + flushi((code_buffer + (i*8))); + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = code_buffer; + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; } -- cgit v1.1 From b6d66c3ac8ae39c47b99dd8b8a7813e6f60c47e7 Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Thu, 3 Oct 2002 15:30:05 +0000 Subject: Changes to tramp.c. Pass more tests. svn path=/trunk/mono/; revision=7966 --- ChangeLog | 5 +++ sparc/tramp.c | 133 ++++++++++++++++++++++++++++++++++------------------------ 2 files changed, 83 insertions(+), 55 deletions(-) diff --git a/ChangeLog b/ChangeLog index 237d081..516531f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2002-10-02 Mark Crichton + + * sparc/tramp.c: More cleanup of the trampoline code. Still some + problems with it w.r.t. delegates. + 2002-09-25 Mark Crichton * sparc/tramp.c: Off by one error. Whoops. Trampoline code should diff --git a/sparc/tramp.c b/sparc/tramp.c index 162a99b..cd1c23b 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -129,9 +129,19 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } size = mono_class_value_size (sig->params[i]->data.klass, NULL); if (size != 4) { - NOT_IMPL("size != 4") - break; + fprintf(stderr, "copy %d byte struct on stack\n", size); + *use_memcpy = TRUE; + *code_size += 8*4; + *stack_size += (size + 3) & (~3); + if (gr > OUT_REGS) { + *code_size += 4; + *stack_size += 4; + } + } else { + add_general (&gr, stack_size, code_size, TRUE); + *code_size += 4; } + break; } case MONO_TYPE_I8: case MONO_TYPE_R8: @@ -143,7 +153,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } /* function return value */ - if (sig->ret->byref) { + if (sig->ret->byref || string_ctor) { *code_size += 8; } else { simpletype = sig->ret->type; @@ -230,8 +240,9 @@ emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size) #define SAVE_4_VAL_IN_GENERIC_REGISTER \ if (gr < OUT_REGS) { \ + g_warning("DOCTOR! LOOK OUT: %p", p); \ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ - sparc_ld (p, sparc_l0, 0, sparc_o0 + gr); \ + sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr); \ gr++; \ } else { \ g_error("FIXME: SAVE_4_VAL_IN_GENERIC_REGISTER"); \ @@ -243,20 +254,18 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, { guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos; guint32 simpletype; - GString *res = g_string_new(""); fr = gr = 0; stack_par_pos = MINIMAL_STACK_SIZE * 4; if (sig->hasthis) { if (use_memcpy) { - NOT_IMPL("emit_save_parameters: use_memcpy #1") + /* we don't need to save a thing. */ } else sparc_mov_reg_reg (p, sparc_i2, sparc_o0); gr ++; } -#if 0 if (use_memcpy) { cur_struct_pos = struct_pos = stack_par_pos; for (i = 0; i < sig->param_count; i++) { @@ -270,26 +279,15 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (size != 4) { /* need to call memcpy here */ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); - sparc_ld_imm (p, sparc_l5, i*16, sparc_o1); - sparc_or_imm (p, 0, sparc_g0, size & 0xffff, sparc_o2); - sparc_sethi (p, (guint32)memcpy, sparc_l0); - sparc_or_imm (p, 0, sparc_l0, (guint32)memcpy & 0x3ff, sparc_l0); + sparc_ld_imm (p, sparc_i3, i*16, sparc_o1); + sparc_set (p, (guint32)size, sparc_o2); + sparc_set (p, (guint32)memcpy, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); - stack_par_pos += (size + 3) & (~3); + stack_par_pos += (size*2 + 3) & (~3); } } } - - if (sig->hasthis) { - sparc_mov_reg_reg (p, sparc_l6, sparc_o0); - sparc_ld (p, sparc_sp, stack_size - 24, sparc_l6); - } - - sparc_mov_reg_reg (p, sparc_l4, sparc_l0); - sparc_mov_reg_reg (p, sparc_l5, sparc_l2); - sparc_ld_imm (p, sparc_sp, stack_size - 16, sparc_l4); - sparc_ld_imm (p, sparc_sp, stack_size - 20, sparc_l5); } if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { @@ -297,17 +295,17 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (!klass->enumtype) { gint size = mono_class_native_size (klass, NULL); + fprintf(stderr, "retval value type size: %d\n", size); if (size > 8) { sparc_ld_imm (p, sparc_sp, stack_size - 12, sparc_o0); sparc_ld_imm (p, sparc_o0, 0, sparc_o0); gr ++; } else { - g_error ("FIXME: size > 8 not implemented"); + g_error ("FIXME: size <= 8 not implemented"); } } } -#endif fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)); @@ -347,7 +345,19 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (size == 4) { SAVE_4_VAL_IN_GENERIC_REGISTER; } else { - NOT_IMPL("emit_save_parameters: size != 4") + if (gr < OUT_REGS) { + sparc_add_imm (p, 0, sparc_sp, + cur_struct_pos, sparc_o0 + gr); + gr ++; + } else { + sparc_ld_imm (p, sparc_sp, + cur_struct_pos, + sparc_l1); + sparc_st_imm (p, sparc_l1, + sparc_sp, + stack_par_pos); + } + cur_struct_pos += (size + 3) & (~3); } break; } @@ -356,7 +366,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_R8: /* this will break in subtle ways... */ if (gr < 5) { - sparc_ld_imm (p, ARG_BASE, ARG_SIZE, sparc_o0 + gr); + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); gr ++; if (gr >= OUT_REGS) { @@ -364,7 +374,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, break; } else { sparc_ld_imm (p, ARG_BASE, - ARG_SIZE + 4, + (i*ARG_SIZE) + 4, sparc_o0 + gr); } gr ++; @@ -477,7 +487,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); p = emit_epilog (p, sig, stack_size); -#if 1 +#if 0 { guchar *cp; fprintf (stderr,".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); @@ -498,7 +508,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) g_hash_table_insert(cache, sig, code_buffer); - return (MonoPIFunc) code_buffer; + return (MonoPIFunc)code_buffer; } #define MINV_POS (MINIMAL_STACK_SIZE * 4) @@ -532,31 +542,33 @@ mono_create_method_pointer (MonoMethod *method) p = code_buffer = g_malloc (code_size); - emit_prolog (p, sig, stack_size); + fprintf(stderr, "Delegate [start emiting] %s\n", method->name); + + p = emit_prolog (p, sig, stack_size); /* fill MonoInvocation */ - sparc_st (p, sparc_g0, sparc_sp, + sparc_st_imm (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); - sparc_st (p, sparc_g0, sparc_sp, + sparc_st_imm (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); - sparc_st (p, sparc_g0, sparc_sp, + sparc_st_imm (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); - sparc_st (p, sparc_g0, sparc_sp, + sparc_st_imm (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); sparc_set (p, (guint32)method, sparc_l0); - sparc_st (p, sparc_l0, sparc_sp, + sparc_st_imm (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); if (sig->hasthis) { - sparc_st (p, sparc_i0, sparc_sp, + sparc_st_imm (p, sparc_i0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); reg_param = 1; } else if (sig->param_count) { - sparc_st (p, sparc_i0, sparc_sp, local_pos); + sparc_st_imm (p, sparc_i0, sparc_sp, local_pos); local_pos += 4; reg_param = 0; } @@ -566,7 +578,7 @@ mono_create_method_pointer (MonoMethod *method) if (sig->param_count) { gint save_count = MIN (OUT_REGS, sig->param_count - 1); for (i = reg_param; i < save_count; i++) { - sparc_st (p, sparc_i1, sparc_sp, local_pos); + sparc_st_imm (p, sparc_i1 + i, sparc_sp, local_pos); local_pos += 4; } } @@ -599,7 +611,7 @@ mono_create_method_pointer (MonoMethod *method) /* set MonoInvocation::stack_args */ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); - sparc_st (p, sparc_l0, sparc_sp, + sparc_st_imm (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); /* add stackval arguments */ @@ -618,7 +630,7 @@ mono_create_method_pointer (MonoMethod *method) if (vtbuf[i] >= 0) { sparc_add_imm (p, 0, sparc_sp, vt_cur, sparc_o1); - sparc_st (p, sparc_o1, sparc_sp, stackval_arg_pos); + sparc_st_imm (p, sparc_o1, sparc_sp, stackval_arg_pos); sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_o1); sparc_ld (p, sparc_o2, 0, sparc_o2); @@ -628,10 +640,11 @@ mono_create_method_pointer (MonoMethod *method) sparc_o1); } - sparc_set (p, sparc_o0, (guint32)sig->params[i]); + sparc_set (p, (guint32)sig->params[i], sparc_o0); + sparc_set (p, (guint32)sig->pinvoke, sparc_o3); /* YOU make the CALL! */ - sparc_set (p, sparc_l0, (guint32)stackval_from_data); + sparc_set (p, (guint32)stackval_from_data, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); @@ -649,18 +662,18 @@ mono_create_method_pointer (MonoMethod *method) sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); } - sparc_st (p, sparc_g0, sparc_sp, + sparc_st_imm (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); /* call ves_exec_method */ - sparc_add_imm (p, 0, sparc_o0, MINV_POS, sparc_sp); - sparc_set (p, sparc_l0, (guint32)ves_exec_method); + sparc_add_imm (p, 0, sparc_sp, MINV_POS, sparc_o0); + sparc_set (p, (guint32)ves_exec_method, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); /* move retval from stackval to proper place (r3/r4/...) */ if (sig->ret->byref) { - sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0 ); + sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0 ); } else { enum_retvalue: switch (sig->ret->type) { @@ -669,11 +682,11 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - sparc_ldub (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_ldub_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); break; case MONO_TYPE_I2: case MONO_TYPE_U2: - sparc_lduh (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_lduh_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); break; case MONO_TYPE_I4: case MONO_TYPE_U4: @@ -682,17 +695,17 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: - sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); break; case MONO_TYPE_I8: - sparc_ld (p, sparc_sp, stackval_arg_pos, sparc_i0); - sparc_ld (p, sparc_sp, stackval_arg_pos + 4, sparc_i1); + sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_ld_imm (p, sparc_sp, stackval_arg_pos + 4, sparc_i1); break; case MONO_TYPE_R4: - sparc_ldf (p, sparc_sp, stackval_arg_pos, sparc_f0); + sparc_ldf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0); break; case MONO_TYPE_R8: - sparc_lddf (p, sparc_sp, stackval_arg_pos, sparc_f0); + sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0); break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { @@ -708,7 +721,7 @@ mono_create_method_pointer (MonoMethod *method) } } - emit_epilog (p, sig, stack_size); + p = emit_epilog (p, sig, stack_size); for (i = 0; i < ((p - code_buffer)/2); i++) flushi((code_buffer + (i*8))); @@ -719,6 +732,16 @@ mono_create_method_pointer (MonoMethod *method) ji->code_start = code_buffer; mono_jit_info_table_add (mono_root_domain, ji); - + +#if 0 + { + guchar *cp; + fprintf (stderr,".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + for (cp = code_buffer; cp < p; cp++) { + fprintf (stderr, ".byte 0x%x\n", *cp); + } + } +#endif + return ji->code_start; } -- cgit v1.1 From 457b666522f839e5e94e5fdda2284255b26d79a2 Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Mon, 7 Oct 2002 03:36:50 +0000 Subject: Fix some minor trampoline nags. Now down to 15 failed tests. Delegate code still broken, if anyone wants to help fix it. svn path=/trunk/mono/; revision=8041 --- sparc/tramp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/sparc/tramp.c b/sparc/tramp.c index cd1c23b..94e6a5f 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -235,17 +235,21 @@ emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size) sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \ gr++; \ } else { \ - g_error("FIXME: SAVE_4_IN_GENERIC_REGISTER"); \ + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ + sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); \ + stack_par_pos += 4; \ } #define SAVE_4_VAL_IN_GENERIC_REGISTER \ if (gr < OUT_REGS) { \ - g_warning("DOCTOR! LOOK OUT: %p", p); \ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr); \ gr++; \ } else { \ - g_error("FIXME: SAVE_4_VAL_IN_GENERIC_REGISTER"); \ + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ + sparc_ld_imm (p, sparc_l0, 0, sparc_l0); \ + sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); \ + stack_par_pos += 4; \ } static inline guint32* -- cgit v1.1 From b669ce7ac5106466cc6d57e9163ca5d6d80611aa Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 24 Oct 2002 19:27:13 +0000 Subject: s390 support from Neale Ferguson . svn path=/trunk/mono/; revision=8521 --- Makefile.am | 2 +- s390/Makefile.am | 7 + s390/s390-codegen.h | 82 ++++ s390/tramp.c | 1223 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1313 insertions(+), 1 deletion(-) create mode 100644 s390/Makefile.am create mode 100644 s390/s390-codegen.h create mode 100644 s390/tramp.c diff --git a/Makefile.am b/Makefile.am index 5f14ee9..1a5ad6a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm +DIST_SUBDIRS = x86 ppc sparc arm s390 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/s390/Makefile.am b/s390/Makefile.am new file mode 100644 index 0000000..1c62a88 --- /dev/null +++ b/s390/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-s390.la + +libmonoarch_s390_la_SOURCES = tramp.c s390-codegen.h + diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h new file mode 100644 index 0000000..5f6c255 --- /dev/null +++ b/s390/s390-codegen.h @@ -0,0 +1,82 @@ +/* + Copyright (C) 2001 Radek Doulik +*/ + +#ifndef S390_H +#define S390_H +#include +#include + +typedef enum { + s390_r0 = 0, + s390_r1, + s390_r2, + s390_r3, + s390_r4, + s390_r5, + s390_r6, + s390_r7, + s390_r8, + s390_r9, + s390_r10, + s390_r11, + s390_r12, + s390_r13, + s390_r14, + s390_r15, +} S390IntRegister; + +typedef enum { + s390_f0 = 0, + s390_f1, + s390_f2, + s390_f3, + s390_f4, + s390_f5, + s390_f6, + s390_f7, + s390_f8, + s390_f9, + s390_f10, + s390_f11, + s390_f12, + s390_f13, + s390_f14, + s390_f15, +} S390FloatRegister; + +typedef enum { + s390_fpc = 256, +} S390SpecialRegister; + +#define s390_word(addr, value) *((guint32 *) addr) = (guint32) (value); ((guint32 *) addr)++ +#define s390_emit16(c, x) *((guint16 *) c) = x; ((guint16 *) c)++ +#define s390_emit32(c, x) *((guint32 *) c) = x; ((guint32 *) c)++ +#define s390_basr(code, r1, r2) s390_emit16 (code, (13 << 8 | (r1) << 4 | (r2))) +#define s390_bras(code, r, o) s390_emit32 (code, (167 << 24 | (r) << 20 | 5 << 16 | (o))) +#define s390_ahi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 10 << 16 | ((v) & 0xffff))) +#define s390_br(code, r) s390_emit16 (code, (7 << 8 | 15 << 4 | (r))) +#define s390_nr(code, r1, r2) s390_emit16 (code, (20 << 8 | (r1) << 4 | (r2))) +#define s390_lr(code, r1, r2) s390_emit16 (code, (24 << 8 | (r1) << 4 | (r2))) +#define s390_l(code, r, b, d) s390_emit32 (code, (88 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_lm(code, r1, r2, b, d) s390_emit32 (code, (152 << 24 | (r1) << 20 | (r2) << 16 \ + | (b) << 12 | ((d) & 0xfff))) +#define s390_lh(code, r, b, d) s390_emit32 (code, (72 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_lhi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 8 << 16 | ((v) & 0xffff))) +#define s390_ic(code, r, b, d) s390_emit32 (code, (67 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_st(code, r, b, d) s390_emit32 (code, (80 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_stm(code, r1, r2, b, d) s390_emit32 (code, (144 << 24 | (r1) << 20 | (r2) << 16 \ + | (b) << 12 | ((d) & 0xfff))) +#define s390_sth(code, r, b, d) s390_emit32 (code, (64 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_stc(code, r, b, d) s390_emit32 (code, (66 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_la(code, r, b, d) s390_emit32 (code, (65 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_ld(code, f, b, d) s390_emit32 (code, (104 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_le(code, f, b, d) s390_emit32 (code, (120 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_std(code, f, b, d) s390_emit32 (code, (96 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_ste(code, f, b, d) s390_emit32 (code, (112 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_mvc(c, l, b1, d1, b2, d2) s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ + (b1) << 12 | ((d1) & 0xfff))); \ + s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff))) +#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))); + +#endif diff --git a/s390/tramp.c b/s390/tramp.c new file mode 100644 index 0000000..d8ecbb5 --- /dev/null +++ b/s390/tramp.c @@ -0,0 +1,1223 @@ +/*------------------------------------------------------------------*/ +/* */ +/* Name - tramp.c */ +/* */ +/* Function - Create trampolines to invoke arbitrary functions. */ +/* */ +/* Name - Neale Ferguson. */ +/* */ +/* Date - October, 2002 */ +/* */ +/* */ +/*------------------------------------------------------------------*/ + +/*------------------------------------------------------------------*/ +/* D e f i n e s */ +/*------------------------------------------------------------------*/ + +#define PROLOG_INS 24 /* Size of emitted prolog */ +#define CALL_INS 4 /* Size of emitted call */ +#define EPILOG_INS 18 /* Size of emitted epilog */ +#define MIN_STACK_SIZE 96 /* Basic size of S/390 stack frame */ +#define FLOAT_REGS 2 /* No. float registers for parms */ +#define GENERAL_REGS 5 /* No. general registers for parms */ + +#define ARG_BASE s390_r10 /* Register for addressing arguments*/ +#define STK_BASE s390_r15 /* Register for addressing stack */ +#define STKARG \ + (i*(sizeof(stackval))) /* Displacement of ith argument */ + +#define MINV_POS 96 /* MonoInvocation stack offset */ +#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) +#define OBJ_POS 8 +#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) + +#define DEBUG(x) + +#define MIN_CACHE_LINE 256 + +/*------------------------------------------------------------------*/ +/* Sequence to add an int/long long to parameters to stack_from_data*/ +/*------------------------------------------------------------------*/ +#define ADD_ISTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_la (p, s390_r4, STK_BASE, \ + sz.stack_size + 96 + stack_param * sizeof(long)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a float/double to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_RSTACK_PARM(i) \ + if (fpr_param < FLOAT_REGS) { \ + s390_la (p, s390_r4, STK_BASE, \ + float_pos + (fpr_param * sizeof(float) * (i))); \ + fpr_param++; \ + } else { \ + stack_param += (stack_param % (i)); \ + s390_la (p, s390_r4, STK_BASE, \ + sz.stack_size + 96 + stack_param * sizeof(float) * (i)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a structure ptr to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_TSTACK_PARM \ + if (reg_param < GENERAL_REGS) { \ + s390_l (p, s390_r4, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param++; \ + } else { \ + s390_l (p, s390_r4, STK_BASE, \ + sz.stack_size + 96 + stack_param * sizeof(long)); \ + stack_param++; \ + } + +#define ADD_PSTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_l (p, s390_r4, STK_BASE, \ + sz.stack_size + 96 + stack_param * sizeof(long)); \ + stack_param++; \ + } + +/*========================= End of Defines =========================*/ + +/*------------------------------------------------------------------*/ +/* I n c l u d e s */ +/*------------------------------------------------------------------*/ + +#ifdef NEED_MPROTECT +# include +# include /* for PAGESIZE */ +# ifndef PAGESIZE +# define PAGESIZE 4096 +# endif +#endif + +#include "config.h" +#include +#include +#include "s390-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" + +/*========================= End of Includes ========================*/ + +/*------------------------------------------------------------------*/ +/* T y p e d e f s */ +/*------------------------------------------------------------------*/ + +/*------------------------------------------------------------------*/ +/* Structure used to accummulate size of stack, code, and locals */ +/*------------------------------------------------------------------*/ +typedef struct { + guint stack_size, + local_size, + code_size, + retStruct; +} size_data; + +/*========================= End of Typedefs ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - add_general */ +/* */ +/* Function - Determine code and stack size incremements for a */ +/* parameter. */ +/* */ +/*------------------------------------------------------------------*/ + +static void inline +add_general (guint *gr, size_data *sz, gboolean simple) +{ + if (simple) { + if (*gr >= GENERAL_REGS) { + sz->stack_size += sizeof(long); + sz->code_size += 12; + } else { + sz->code_size += 8; + } + } else { + if (*gr >= GENERAL_REGS - 1) { + sz->stack_size += 8 + (sz->stack_size % 8); + sz->code_size += 10; + } else { + sz->code_size += 8; + } + (*gr) ++; + } + (*gr) ++; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - calculate_sizes */ +/* */ +/* Function - Determine the amount of space required for code */ +/* and stack. In addition determine starting points */ +/* for stack-based parameters, and area for struct- */ +/* ures being returned on the stack. */ +/* */ +/*------------------------------------------------------------------*/ + +static void inline +calculate_sizes (MonoMethodSignature *sig, size_data *sz, + gboolean string_ctor) +{ + guint i, fr, gr, size; + guint32 simpletype, align; + + fr = 0; + gr = 2; + sz->retStruct = 0; + sz->stack_size = MIN_STACK_SIZE; + sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS); + sz->local_size = 0; + + if (sig->hasthis) { + add_general (&gr, sz, TRUE); + } + + /*----------------------------------------------------------*/ + /* We determine the size of the return code/stack in case we*/ + /* need to reserve a register to be used to address a stack */ + /* area that the callee will use. */ + /*----------------------------------------------------------*/ + + if (sig->ret->byref || string_ctor) { + sz->code_size += 8; + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + sz->code_size += 4; + break; + case MONO_TYPE_I8: + sz->code_size += 4; + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + gr++; + if (sig->pinvoke) + size = mono_class_native_size (sig->ret->data.klass, &align); + else + size = mono_class_value_size (sig->ret->data.klass, &align); + if (align > 1) + sz->code_size += 10; + switch (size) { + /*----------------------------------*/ + /* On S/390, structures of size 1, */ + /* 2, 4, and 8 bytes are returned */ + /* in (a) register(s). */ + /*----------------------------------*/ + case 1: + case 2: + case 4: + case 8: + sz->code_size += 16; + sz->stack_size += 4; + break; + default: + sz->retStruct = 1; + sz->code_size += 32; + } + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + + /*----------------------------------------------------------*/ + /* We determine the size of the parameter code and stack */ + /* requirements by checking the types and sizes of the */ + /* parameters. */ + /*----------------------------------------------------------*/ + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + add_general (&gr, sz, TRUE); + continue; + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + add_general (&gr, sz, TRUE); + break; + case MONO_TYPE_SZARRAY: + add_general (&gr, sz, TRUE); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (sig->pinvoke) + size = mono_class_native_size (sig->params [i]->data.klass, &align); + else + size = mono_class_value_size (sig->params [i]->data.klass, &align); + DEBUG(printf("%d typesize: %d (%d)\n",i,size,align)); + switch (size) { + /*----------------------------------*/ + /* On S/390, structures of size 1, */ + /* 2, 4, and 8 bytes are passed in */ + /* (a) register(s). */ + /*----------------------------------*/ + case 0: + case 1: + case 2: + case 4: + add_general(&gr, sz, TRUE); + break; + case 8: + add_general(&gr, sz, FALSE); + break; + default: + sz->local_size += (size + (size % align)); + sz->code_size += 40; + } + break; + case MONO_TYPE_I8: + add_general (&gr, sz, FALSE); + break; + case MONO_TYPE_R4: + if (fr < FLOAT_REGS) { + sz->code_size += 4; + fr++; + } + else { + sz->code_size += 4; + sz->stack_size += 8; + } + break; + case MONO_TYPE_R8: + if (fr < FLOAT_REGS) { + sz->code_size += 4; + fr++; + } else { + sz->code_size += 4; + sz->stack_size += 8 + (sz->stack_size % 8); + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + + /* align stack size to 8 */ + DEBUG (printf (" stack size: %d (%d)\n" + " code size: %d\n" + " local size: %d\n", + (sz->stack_size + 8) & ~8, sz->stack_size, + (sz->code_size),(sz->local_size + 8) & ~8)); + sz->stack_size = (sz->stack_size + 8) & ~8; + sz->local_size = (sz->local_size + 8) & ~8; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_prolog */ +/* */ +/* Function - Create the instructions that implement the stand- */ +/* ard function prolog according to the S/390 ABI. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + guint stack_size; + + stack_size = sz->stack_size + sz->local_size; + + /* function prolog */ + s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); + s390_l (p, s390_r7, STK_BASE, 96); + s390_lr (p, s390_r11, STK_BASE); + s390_ahi (p, STK_BASE, -stack_size); + s390_st (p, s390_r11, STK_BASE, 0); + + /*-----------------------------------------*/ + /* Save: */ + /* - address of "callme" */ + /* - address of "retval" */ + /* - address of "arguments" */ + /*-----------------------------------------*/ + s390_lr (p, s390_r9, s390_r2); + s390_lr (p, s390_r8, s390_r3); + s390_lr (p, s390_r10, s390_r5); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_save_parameters */ +/* */ +/* Function - Create the instructions that load registers with */ +/* parameters, place others on the stack according */ +/* to the S/390 ABI. */ +/* */ +/* The resulting function takes the form: */ +/* void func (void (*callme)(), void *retval, */ +/* void *this_obj, stackval *arguments); */ +/* */ +/*------------------------------------------------------------------*/ + +inline static guint8* +emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + guint i, fr, gr, act_strs, align, + stack_par_pos, size, local_pos; + guint32 simpletype; + + /*----------------------------------------------------------*/ + /* If a structure on stack is being returned, reserve r2 */ + /* to point to an area where it can be passed. */ + /*----------------------------------------------------------*/ + if (sz->retStruct) + gr = 1; + else + gr = 0; + fr = 0; + act_strs = 0; + stack_par_pos = MIN_STACK_SIZE; + local_pos = sz->stack_size; + + if (sig->hasthis) { + s390_lr (p, s390_r2 + gr, s390_r4); + gr++; + } + + act_strs = 0; + for (i = 0; i < sig->param_count; ++i) { + DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref)); + if (sig->params [i]->byref) { + if (gr < GENERAL_REGS) { + s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + gr ++; + } else { + s390_l (p, s390_r0, ARG_BASE, STKARG); + s390_st (p, s390_r0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + continue; + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + if (gr < GENERAL_REGS) { + s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + gr ++; + } else { + s390_l (p, s390_r0, ARG_BASE, STKARG); + s390_st (p, s390_r0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (sig->pinvoke) + size = mono_class_native_size (sig->params [i]->data.klass, &align); + else + size = mono_class_value_size (sig->params [i]->data.klass, &align); + DEBUG(printf("parStruct - size %d pinvoke: %d\n",size,sig->pinvoke)); + switch (size) { + case 0: + case 1: + case 2: + case 4: + if (gr < GENERAL_REGS) { + s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + s390_l (p, s390_r2 + gr, s390_r2 + gr, 0); + gr++; + } else { + stack_par_pos += (stack_par_pos % align); + s390_l (p, s390_r10, ARG_BASE, STKARG); + s390_l (p, s390_r10, s390_r10, 0); + s390_st (p, s390_r10, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + break; + case 8: + if (gr < GENERAL_REGS-1) { + s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + s390_lm (p, s390_r2 + gr, s390_r3 + gr, s390_r2 + gr, 0); + } else { + stack_par_pos += (stack_par_pos % align); + s390_l (p, s390_r10, ARG_BASE, STKARG); + s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0); + stack_par_pos += sizeof(long long); + } + break; + default: + if (size <= 256) { + local_pos += (local_pos % align); + s390_l (p, s390_r13, ARG_BASE, STKARG); + s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0); + s390_la (p, s390_r13, STK_BASE, local_pos); + local_pos += size; + } else { + local_pos += (local_pos % align); + s390_bras (p, s390_r13, 4); + s390_word (p, size); + s390_l (p, s390_r1, s390_r13, 0); + s390_l (p, s390_r0, ARG_BASE, STKARG); + s390_lr (p, s390_r14, s390_r12); + s390_la (p, s390_r12, STK_BASE, local_pos); + s390_lr (p, s390_r13, s390_r1); + s390_mvcl (p, s390_r12, s390_r0); + s390_lr (p, s390_r12, s390_r14); + s390_la (p, s390_r13, STK_BASE, local_pos); + local_pos += size; + } + if (gr < GENERAL_REGS) { + s390_lr (p, s390_r2 + gr, s390_r13); + gr++; + } else { + s390_st (p, s390_r13, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + } + break; + case MONO_TYPE_I8: + if (gr < GENERAL_REGS-1) { + s390_lm (p, s390_r2 + gr, s390_r2 + gr + 1, ARG_BASE, STKARG); + gr += 2; + } else { + *(guint32 *) p += 7; + *(guint32 *) p &= ~7; + s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); + } + break; + case MONO_TYPE_R4: + if (fr < FLOAT_REGS) { + s390_le (p, s390_r0 + fr, ARG_BASE, STKARG); + fr++; + } else { + s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(float); + } + break; + case MONO_TYPE_R8: + if (fr < FLOAT_REGS) { + s390_ld (p, s390_r0 + fr, ARG_BASE, STKARG); + fr++; + } else { + *(guint32 *) p += 7; + *(guint32 *) p &= ~7; + s390_mvc (p, sizeof(double), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + /*----------------------------------------------------------*/ + /* If we're returning a structure but not in a register */ + /* then point the result area for the called routine */ + /*----------------------------------------------------------*/ + if (sz->retStruct) { + s390_l (p, s390_r2, s390_r8, 0); + } + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - alloc_code_memory */ +/* */ +/* Function - Allocate space to place the emitted code. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +alloc_code_memory (guint code_size) +{ + guint8 *p; + +#ifdef NEED_MPROTECT + p = g_malloc (code_size + PAGESIZE - 1); + + /* Align to a multiple of PAGESIZE, assumed to be a power of two */ + p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1)); +#else + p = g_malloc (code_size); +#endif + DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4)); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_call_and_store_retval */ +/* */ +/* Function - Emit code that will implement the call to the */ +/* desired function, and unload the result according */ +/* to the S390 ABI for the type of value returned */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, + size_data *sz, gboolean string_ctor) +{ + guint32 simpletype; + guint retSize, align; + + /* call "callme" */ + s390_basr (p, s390_r14, s390_r9); + + /* get return value */ + if (sig->ret->byref || string_ctor) { + s390_st (p, s390_r2, s390_r8, 0); + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + s390_stc (p, s390_r2, s390_r8, 0); + break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + s390_sth (p, s390_r2, s390_r8, 0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + s390_st (p, s390_r2, s390_r8, 0); + break; + case MONO_TYPE_R4: + s390_ste (p, s390_f0, s390_r8, 0); + break; + case MONO_TYPE_R8: + s390_std (p, s390_f0, s390_r8, 0); + break; + case MONO_TYPE_I8: + s390_stm (p, s390_r2, s390_r3, s390_r8, 0); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + if (sig->pinvoke) + retSize = mono_class_native_size (sig->ret->data.klass, &align); + else + retSize = mono_class_value_size (sig->ret->data.klass, &align); +printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); + switch(retSize) { + case 0: + break; + case 1: + s390_stc (p, s390_r2, s390_r8, 0); + break; + case 2: + s390_sth (p, s390_r2, s390_r8, 0); + break; + case 4: + s390_st (p, s390_r2, s390_r8, 0); + break; + case 8: + s390_stm (p, s390_r2, s390_r3, s390_r8, 0); + break; + default: + /*------------------------------------------*/ + /* The callee has already placed the result */ + /* in the required area */ + /*------------------------------------------*/ + } + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", + sig->ret->type); + } + } + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_epilog */ +/* */ +/* Function - Create the instructions that implement the stand- */ +/* ard function epilog according to the S/390 ABI. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + /* function epilog */ + s390_l (p, STK_BASE, STK_BASE, 0); + s390_l (p, s390_r4, STK_BASE, 56); + s390_lm (p, s390_r6, STK_BASE, STK_BASE, 24); + s390_br (p, s390_r4); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - mono_create_trampoline. */ +/* */ +/* Function - Create the code that will allow a mono method to */ +/* invoke a system subroutine. */ +/* */ +/*------------------------------------------------------------------*/ + +MonoPIFunc +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + guint8 *p, *code_buffer; + size_data sz; + + DEBUG (printf ("\nPInvoke [start emiting]\n")); + calculate_sizes (sig, &sz, string_ctor); + + p = code_buffer = alloc_code_memory (sz.code_size); + p = emit_prolog (p, sig, &sz); + p = emit_save_parameters (p, sig, &sz); + p = emit_call_and_store_retval (p, sig, &sz, string_ctor); + p = emit_epilog (p, sig, &sz); + +#ifdef NEED_MPROTECT + if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) { + g_error ("Cannot mprotect trampoline\n"); + } +#endif + + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); + + DEBUG (printf ("PInvoke [end emiting]\n")); + + return (MonoPIFunc) code_buffer; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - mono_create_method_pointer */ +/* */ +/* Function - Returns a pointer to a native function that can */ +/* be used to call the specified method. */ +/* */ +/* The function created will receive the arguments */ +/* according to the calling convention specified in */ +/* in the method. */ +/* */ +/* This function works by creating a MonoInvocation */ +/* structure, filling the fields in and calling */ +/* ves_exec_method() on it. */ +/* */ +/* Logic: */ +/* ------ */ +/* mono_create_method_pointer (MonoMethod *method) */ +/* create the unmanaged->managed wrapper */ +/* register it with mono_jit_info_table_add() */ +/* */ +/* What does the unmanaged->managed wrapper do? */ +/* allocate a MonoInvocation structure (inv) on the stack */ +/* allocate an array of stackval on the stack with length = */ +/* method->signature->param_count + 1 [call it stack_args] */ +/* set inv->ex, inv->ex_handler, inv->child, inv->parent to */ +/* NULL */ +/* set inv->method to method */ +/* if method is an instance method, set inv->obj to the */ +/* 'this' argument (the first argument) else set to NULL */ +/* for each argument to the method call: */ +/* stackval_from_data (sig->params[i], &stack_args[i], */ +/* arg, sig->pinvoke); */ +/* Where: */ +/* ------ */ +/* sig - is method->signature */ +/* &stack_args[i] - is the pointer to the ith element */ +/* in the stackval array */ +/* arg - is a pointer to the argument re- */ +/* ceived by the function according */ +/* to the call convention. If it */ +/* gets passed in a register, save */ +/* on the stack first. */ +/* */ +/* set inv->retval to the address of the last element of */ +/* stack_args [recall we allocated param_count+1 of them] */ +/* call ves_exec_method(inv) */ +/* copy the returned value from inv->retval where the calling */ +/* convention expects to find it on return from the wrap- */ +/* per [if it's a structure, use stackval_to_data] */ +/* */ +/*------------------------------------------------------------------*/ + +void * +mono_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig; + MonoJitInfo *ji; + guint8 *p, *code_buffer; + guint i, align = 0, simple_type, retSize, reg_save = 0, + stackval_arg_pos, local_pos, float_pos, + local_start, reg_param = 0, stack_param, + this_flag, arg_pos, fpr_param, parSize; + guint32 simpletype; + size_data sz; + int *vtbuf, cpos, vt_cur; + + sz.code_size = 1024; + sz.stack_size = 1024; + stack_param = 0; + fpr_param = 0; + arg_pos = 0; + + sig = method->signature; + + p = code_buffer = g_malloc (sz.code_size); + + DEBUG (printf ("\nDelegate [start emiting] %s at 0x%08x\n", + method->name,p)); + + /*----------------------------------------------------------*/ + /* prolog */ + /*----------------------------------------------------------*/ + s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); + s390_l (p, s390_r7, STK_BASE, 96); + s390_lr (p, s390_r0, STK_BASE); + s390_ahi (p, STK_BASE, -(sz.stack_size+96)); + s390_st (p, s390_r0, STK_BASE, 0); + s390_la (p, s390_r8, STK_BASE, 4); + s390_lr (p, s390_r10, s390_r8); + s390_lhi (p, s390_r9, sz.stack_size+92); + s390_lhi (p, s390_r11, 0); + s390_mvcl(p, s390_r8, s390_r10); + + /*----------------------------------------------------------*/ + /* Let's fill MonoInvocation - first zero some fields */ + /*----------------------------------------------------------*/ + s390_lhi (p, s390_r0, 0); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); + s390_lhi (p, s390_r0, 1); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); + + /*----------------------------------------------------------*/ + /* set method pointer */ + /*----------------------------------------------------------*/ + s390_bras (p, s390_r13, 4); + s390_word (p, method); + s390_l (p, s390_r0, s390_r13, 0); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); + + local_start = local_pos = MINV_POS + + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); + this_flag = (sig->hasthis ? 1 : 0); + + /*----------------------------------------------------------*/ + /* if we are returning a structure, checks it's length to */ + /* see if there's a "hidden" parameter that points to the */ + /* area. If necessary save this hidden parameter for later */ + /*----------------------------------------------------------*/ + if (MONO_TYPE_ISSTRUCT(sig->ret)) { + if (sig->pinvoke) + retSize = mono_class_native_size (sig->ret->data.klass, &align); + else + retSize = mono_class_value_size (sig->ret->data.klass, &align); + switch(retSize) { + case 0: + case 1: + case 2: + case 4: + case 8: + sz.retStruct = 0; + break; + default: + sz.retStruct = 1; + s390_lr(p, s390_r8, s390_r2); + reg_save = 1; + } + } else { + reg_save = 0; + } + + if (this_flag) { + s390_st (p, s390_r2 + reg_save, STK_BASE, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + reg_param++; + } else { + s390_st (p, s390_r2 + reg_save, STK_BASE, local_pos); + local_pos += sizeof(int); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + } + + s390_stm (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos); + local_pos += 4 * sizeof(long); + float_pos = local_pos; + s390_std (p, s390_f0, STK_BASE, local_pos); + local_pos += sizeof(double); + s390_std (p, s390_f2, STK_BASE, local_pos); + local_pos += sizeof(double); + + /*----------------------------------------------------------*/ + /* prepare space for valuetypes */ + /*----------------------------------------------------------*/ + vt_cur = local_pos; + vtbuf = alloca (sizeof(int)*sig->param_count); + cpos = 0; + for (i = 0; i < sig->param_count; i++) { + MonoType *type = sig->params [i]; + vtbuf [i] = -1; + DEBUG(printf("par: %d type: %d ref: %d\n",i,type->type,type->byref)); + if (type->type == MONO_TYPE_VALUETYPE) { + MonoClass *klass = type->data.klass; + gint size; + + if (klass->enumtype) + continue; + size = mono_class_native_size (klass, &align); + cpos += align - 1; + cpos &= ~(align - 1); + vtbuf [i] = cpos; + cpos += size; + } + } + cpos += 3; + cpos &= ~3; + + local_pos += cpos; + + /*----------------------------------------------------------*/ + /* set MonoInvocation::stack_args */ + /*----------------------------------------------------------*/ + stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); + s390_la (p, s390_r0, STK_BASE, stackval_arg_pos); + s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); + + /*----------------------------------------------------------*/ + /* add stackval arguments */ + /*----------------------------------------------------------*/ + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + ADD_ISTACK_PARM(0, 1); + } else { + simple_type = sig->params [i]->type; + enum_savechk: + switch (simple_type) { + case MONO_TYPE_I8: + ADD_ISTACK_PARM(-1, 2); + break; + case MONO_TYPE_R4: + ADD_RSTACK_PARM(1); + break; + case MONO_TYPE_R8: + ADD_RSTACK_PARM(2); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simple_type = sig->params [i]->data.klass->enum_basetype->type; + goto enum_savechk; + } + if (sig->pinvoke) + parSize = mono_class_native_size (sig->params [i]->data.klass, &align); + else + parSize = mono_class_value_size (sig->params [i]->data.klass, &align); + switch(parSize) { + case 0: + case 1: + case 2: + case 4: + ADD_PSTACK_PARM(0, 1); + break; + case 8: + ADD_PSTACK_PARM(-1, 2); + break; + default: + ADD_TSTACK_PARM; + } + break; + default: + ADD_ISTACK_PARM(0, 1); + } + } + + if (vtbuf [i] >= 0) { + s390_la (p, s390_r3, STK_BASE, vt_cur); + s390_st (p, s390_r3, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r3, STK_BASE, stackval_arg_pos); + vt_cur += vtbuf [i]; + } else { + s390_la (p, s390_r3, STK_BASE, stackval_arg_pos); + } + + /*--------------------------------------*/ + /* Load the parameter registers for the */ + /* call to stackval_from_data */ + /*--------------------------------------*/ + s390_bras (p, s390_r13, 8); + s390_word (p, sig->params [i]); + s390_word (p, sig->pinvoke); + s390_word (p, stackval_from_data); + s390_l (p, s390_r2, s390_r13, 0); + + s390_l (p, s390_r5, s390_r13, 4); + + s390_l (p, s390_r9, s390_r13, 8); + s390_basr (p, s390_r14, s390_r9); + + stackval_arg_pos += sizeof(stackval); + + /* fixme: alignment */ + DEBUG (printf ("arg_pos %d --> ", arg_pos)); + if (sig->pinvoke) + arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + arg_pos += mono_type_stack_size (sig->params [i], &align); + + DEBUG (printf ("%d\n", stackval_arg_pos)); + } + + /*----------------------------------------------------------*/ + /* Set return area pointer. */ + /*----------------------------------------------------------*/ + s390_la (p, s390_r10, STK_BASE, stackval_arg_pos); + s390_st (p, s390_r10, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + s390_la (p, s390_r9, s390_r10, sizeof(stackval)); + s390_st (p, s390_r9, STK_BASE, stackval_arg_pos); + stackval_arg_pos += sizeof(stackval); + } + } + + /*----------------------------------------------------------*/ + /* call ves_exec_method */ + /*----------------------------------------------------------*/ + s390_bras (p, s390_r13, 4); + s390_word (p, ves_exec_method); + s390_l (p, s390_r9, s390_r13, 0); + s390_la (p, s390_r2, STK_BASE, MINV_POS); + s390_basr (p, s390_r14, s390_r9); + + /*----------------------------------------------------------*/ + /* move retval from stackval to proper place (r3/r4/...) */ + /*----------------------------------------------------------*/ + DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref)); + if (sig->ret->byref) { + DEBUG (printf ("ret by ref\n")); + s390_st (p, s390_r2, s390_r10, 0); + } else { + enum_retvalue: +DEBUG(printf("Returns: %d\n",sig->ret->type)); + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_U1: + s390_lhi (p, s390_r2, 0); + s390_ic (p, s390_r2, s390_r10, 0); + break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + s390_lh (p, s390_r2, s390_r10, 0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + s390_l (p, s390_r2, s390_r10, 0); + break; + case MONO_TYPE_I8: + s390_lm (p, s390_r2, s390_r3, s390_r10, 0); + break; + case MONO_TYPE_R4: + s390_le (p, s390_f0, s390_r10, 0); + break; + case MONO_TYPE_R8: + s390_ld (p, s390_f0, s390_r10, 0); + break; + case MONO_TYPE_VALUETYPE: +DEBUG(printf("Returning Structure %d\n",sig->pinvoke)); +DEBUG(printf("Size: %d (%d)\n",retSize,align)); + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + /*---------------------------------*/ + /* Call stackval_to_data to return */ + /* the structure */ + /*---------------------------------*/ + s390_bras (p, s390_r13, 8); + s390_word (p, sig->ret); + s390_word (p, sig->pinvoke); + s390_word (p, stackval_to_data); + s390_l (p, s390_r2, s390_r13, 0); + s390_l (p, s390_r3, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); +DEBUG(printf("====> %08X\n",p)); + if (sz.retStruct) { + /*------------------------------------------*/ + /* Get stackval_to_data to set result area */ + /*------------------------------------------*/ + s390_lr (p, s390_r4, s390_r8); + } else { + /*------------------------------------------*/ + /* Give stackval_to_data a temp result area */ + /*------------------------------------------*/ + s390_la (p, s390_r4, STK_BASE, stackval_arg_pos); + } + s390_l (p, s390_r5, s390_r13, 4); + s390_l (p, s390_r9, s390_r13, 8); + s390_basr (p, s390_r14, s390_r9); + switch (retSize) { + case 0: + break; + case 1: + s390_lhi (p, s390_r2, 0); + s390_ic (p, s390_r2, s390_r10, 0); + break; + case 2: + s390_lh (p, s390_r2, s390_r10, 0); + break; + case 4: + s390_l (p, s390_r2, s390_r10, 0); + break; + case 8: + s390_lm (p, s390_r2, s390_r3, s390_r10, 0); + break; + default: + /*-------------------------------------------------*/ + /* stackval_to_data has placed data in result area */ + /*-------------------------------------------------*/ + } + break; + default: + g_error ("Type 0x%x not handled yet in thunk creation", + sig->ret->type); + break; + } + } + + /*----------------------------------------------------------*/ + /* epilog */ + /*----------------------------------------------------------*/ + s390_l (p, STK_BASE, STK_BASE, 0); + s390_l (p, s390_r4, STK_BASE, 56); + s390_lm (p, s390_r6, STK_BASE, STK_BASE, 24); + s390_br (p, s390_r4); + + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); + + DEBUG (printf ("Delegate [end emiting]\n")); + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = code_buffer; + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; +} + +/*========================= End of Function ========================*/ -- cgit v1.1 From d4f44103ed442b9a6e221b58b68550c1de4dfa2b Mon Sep 17 00:00:00 2001 From: Mark Crichton Date: Mon, 11 Nov 2002 19:13:08 +0000 Subject: Some debugging stubs. svn path=/trunk/mono/; revision=8922 --- ChangeLog | 4 ++++ sparc/tramp.c | 57 ++++++++++++++++++++++++++++++++------------------------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/ChangeLog b/ChangeLog index 516531f..e5944b6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2002-11-11 Mark Crichton + + * sparc/tramp.c: Added some disassembly bits for debugging. + 2002-10-02 Mark Crichton * sparc/tramp.c: More cleanup of the trampoline code. Still some diff --git a/sparc/tramp.c b/sparc/tramp.c index 94e6a5f..e9c6a24 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -59,6 +59,26 @@ sig_to_name (MonoMethodSignature *sig, const char *prefix) } static void +sparc_disassemble_code (guint32 *code_buffer, guint32 *p, char *id) +{ + guchar *cp; + FILE *ofd; + + if (!(ofd = fopen ("/tmp/test.s", "w"))) + g_assert_not_reached(); + + fprintf (ofd, "%s:\n", id); + + for (cp = code_buffer; cp < p; cp++) + fprintf (ofd, ".byte %d\n", *cp); + + fclose (ofd); + + system ("as /tmp/test.s -o /tmp/test.o;objdump -d /tmp/test.o"); +} + + +static void add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) { if (simple) { @@ -127,7 +147,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } - size = mono_class_value_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (sig->params[i]->data.klass, NULL); if (size != 4) { fprintf(stderr, "copy %d byte struct on stack\n", size); *use_memcpy = TRUE; @@ -205,7 +225,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } } - *stack_size = (*stack_size + 15) & (~15); + *stack_size = (*stack_size + 7) & (~7); } static inline guint32 * @@ -279,16 +299,16 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, !sig->params[i]->data.klass->enumtype) { gint size; - size = mono_class_value_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (sig->params[i]->data.klass, NULL); if (size != 4) { /* need to call memcpy here */ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); sparc_ld_imm (p, sparc_i3, i*16, sparc_o1); sparc_set (p, (guint32)size, sparc_o2); - sparc_set (p, (guint32)memcpy, sparc_l0); + sparc_set (p, (guint32)memmove, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); - stack_par_pos += (size*2 + 3) & (~3); + stack_par_pos += (size + 3) & (~3); } } } @@ -345,7 +365,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } - size = mono_class_value_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (sig->params[i]->data.klass, NULL); if (size == 4) { SAVE_4_VAL_IN_GENERIC_REGISTER; } else { @@ -491,15 +511,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); p = emit_epilog (p, sig, stack_size); -#if 0 - { - guchar *cp; - fprintf (stderr,".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - for (cp = code_buffer; cp < p; cp++) { - fprintf (stderr, ".byte 0x%x\n", *cp); - } - } -#endif + //sparc_disassemble_code (code_buffer, p, sig_to_name(sig, NULL)); /* So here's the deal... * UltraSPARC will flush a whole cache line at a time @@ -547,6 +559,7 @@ mono_create_method_pointer (MonoMethod *method) p = code_buffer = g_malloc (code_size); fprintf(stderr, "Delegate [start emiting] %s\n", method->name); + fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)); p = emit_prolog (p, sig, stack_size); @@ -637,7 +650,7 @@ mono_create_method_pointer (MonoMethod *method) sparc_st_imm (p, sparc_o1, sparc_sp, stackval_arg_pos); sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_o1); - sparc_ld (p, sparc_o2, 0, sparc_o2); + //sparc_ld (p, sparc_o2, 0, sparc_o2); vt_cur += vtbuf[i]; } else { sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, @@ -737,15 +750,9 @@ mono_create_method_pointer (MonoMethod *method) mono_jit_info_table_add (mono_root_domain, ji); -#if 0 - { - guchar *cp; - fprintf (stderr,".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - for (cp = code_buffer; cp < p; cp++) { - fprintf (stderr, ".byte 0x%x\n", *cp); - } - } -#endif + sparc_disassemble_code (code_buffer, p, method->name); + + fprintf(stderr, "Delegate [end emiting] %s\n", method->name); return ji->code_start; } -- cgit v1.1 From 6d1b716753c1cc8a2f5c26338020941aa58ce9d7 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 15 Jan 2003 15:21:26 +0000 Subject: Update to the API change of a while ago. svn path=/trunk/mono/; revision=10545 --- unknown.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unknown.c b/unknown.c index d02edca..cdb7a4a 100644 --- a/unknown.c +++ b/unknown.c @@ -2,7 +2,7 @@ #include "mono/interpreter/interp.h" MonoPIFunc -mono_create_trampoline (MonoMethod *method) +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { g_error ("Unsupported arch"); return NULL; -- cgit v1.1 From d2321af1b58b2fbb84c3b2cf3f6c7c7db0a787a4 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 17 Jan 2003 20:17:58 +0000 Subject: Fri Jan 17 21:14:18 CET 2003 Paolo Molaro * ppc/tramp.c: adapted to work for MacOSX (from a patch by John Duncan). svn path=/trunk/mono/; revision=10630 --- ChangeLog | 6 ++++ ppc/tramp.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 80 insertions(+), 18 deletions(-) diff --git a/ChangeLog b/ChangeLog index e5944b6..8669fb8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ + +Fri Jan 17 21:14:18 CET 2003 Paolo Molaro + + * ppc/tramp.c: adapted to work for MacOSX (from a patch by + John Duncan). + 2002-11-11 Mark Crichton * sparc/tramp.c: Added some disassembly bits for debugging. diff --git a/ppc/tramp.c b/ppc/tramp.c index d40660e..529d1b4 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -22,7 +22,7 @@ #endif #endif -#define DEBUG(x) x +#define DEBUG(x) /* gpointer fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) @@ -64,9 +64,22 @@ flush_icache (guint8 *code, guint size) #define PROLOG_INS 8 #define CALL_INS 2 #define EPILOG_INS 6 -#define MINIMAL_STACK_SIZE 5 #define FLOAT_REGS 8 #define GENERAL_REGS 8 +#ifdef __APPLE__ +#define MINIMAL_STACK_SIZE 10 +#define ALWAYS_ON_STACK(s) s +#define FP_ALSO_IN_REG(s) s +#define RET_ADDR_OFFSET 8 +#define STACK_PARAM_OFFSET 24 +#else +#define MINIMAL_STACK_SIZE 5 +#define ALWAYS_ON_STACK(s) +#define FP_ALSO_IN_REG(s) s +#define ALIGN_DOUBLES +#define RET_ADDR_OFFSET 4 +#define STACK_PARAM_OFFSET 8 +#endif static void inline add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) @@ -76,17 +89,24 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) *stack_size += 4; *code_size += 8; /* load from stack, save on stack */ } else { + ALWAYS_ON_STACK (*stack_size += 4); *code_size += 4; /* load from stack */ } } else { if (*gr >= GENERAL_REGS - 1) { - *stack_size += 8 + (*stack_size % 8); + *stack_size += 8; +#ifdef ALIGN_DOUBLES + *stack_size += (*stack_size % 8); +#endif *code_size += 16; /* 2x load from stack, 2x save to stack */ } else { - *code_size += 16; /* 2x load from stack */ + ALWAYS_ON_STACK (*stack_size += 8); + *code_size += 8; /* 2x load from stack */ } - if ((*gr) && 1) +#ifdef ALIGN_DOUBLES + if ((*gr) & 1) (*gr) ++; +#endif (*gr) ++; } (*gr) ++; @@ -105,9 +125,11 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, if (sig->hasthis) { add_general (&gr, stack_size, code_size, TRUE); } - + DEBUG(printf("params: %d\n", sig->param_count)); for (i = 0; i < sig->param_count; ++i) { + DEBUG(printf("param %d: ", i)); if (sig->params [i]->byref) { + DEBUG(printf("byref\n")); add_general (&gr, stack_size, code_size, TRUE); continue; } @@ -163,10 +185,21 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, add_general (&gr, stack_size, code_size, FALSE); break; case MONO_TYPE_R4: + if (fr < 7) { + *code_size += 4; + fr ++; + FP_ALSO_IN_REG (gr ++); + ALWAYS_ON_STACK (*stack_size += 4); + } else { + NOT_IMPLEMENTED ("R4 arg"); + } + break; case MONO_TYPE_R8: if (fr < 7) { *code_size += 4; fr ++; + FP_ALSO_IN_REG (gr += 2); + ALWAYS_ON_STACK (*stack_size += 8); } else { NOT_IMPLEMENTED ("R8 arg"); } @@ -239,7 +272,7 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ ppc_mflr (p, ppc_r0); /* r0 <--- LR */ ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ - ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ + ppc_stw (p, ppc_r0, stack_size + RET_ADDR_OFFSET, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ return p; @@ -260,6 +293,7 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); \ gr ++; \ + ALWAYS_ON_STACK (stack_par_pos += 4); \ } else { \ ppc_lwz (p, ppc_r11, i*16, ARG_BASE); \ ppc_lwz (p, ppc_r11, 0, ppc_r11); \ @@ -274,7 +308,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo guint32 simpletype; fr = gr = 0; - stack_par_pos = 8; + stack_par_pos = STACK_PARAM_OFFSET; ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ @@ -295,6 +329,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo } else ppc_mr (p, ppc_r3, ppc_r5); gr ++; + ALWAYS_ON_STACK (stack_par_pos += 4); } if (use_memcpy) { @@ -341,6 +376,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo ppc_lwz (p, ppc_r3, stack_size - 12, ppc_r31); ppc_lwz (p, ppc_r3, 0, ppc_r3); gr ++; + ALWAYS_ON_STACK (stack_par_pos += 4); } else { NOT_IMPLEMENTED ("retval valuetype <= 8 bytes"); } @@ -395,21 +431,35 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo break; } case MONO_TYPE_I8: +DEBUG(printf("Mono_Type_i8. gr = %d, arg_base = %d\n", gr, ARG_BASE)); +#ifdef ALIGN_DOUBLES + if (gr & 1) + gr++; +#endif if (gr < 7) { - if (gr & 1) - gr ++; ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); - gr ++; - ppc_lwz (p, ppc_r3 + gr, i*16 + 4, ARG_BASE); - gr ++; - } else { - NOT_IMPLEMENTED ("i8 on stack"); + ppc_lwz (p, ppc_r3 + gr + 1, i*16 + 4, ARG_BASE); + ALWAYS_ON_STACK (stack_par_pos += 8); + } else if (gr == 7) { + ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); + ppc_lwz (p, ppc_r11, i*16 + 4, ARG_BASE); + ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); + stack_par_pos += 8; + } else { + ppc_lwz (p, ppc_r11, i*16, ARG_BASE); + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); + ppc_lwz (p, ppc_r11, i*16 + 4, ARG_BASE); + ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); + stack_par_pos += 8; } + gr += 2; break; case MONO_TYPE_R4: if (fr < 7) { ppc_lfs (p, ppc_f1 + fr, i*16, ARG_BASE); fr ++; + FP_ALSO_IN_REG (gr ++); + ALWAYS_ON_STACK (stack_par_pos += 4); } else { NOT_IMPLEMENTED ("r4 on stack"); } @@ -418,6 +468,8 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gbo if (fr < 7) { ppc_lfd (p, ppc_f1 + fr, i*16, ARG_BASE); fr ++; + FP_ALSO_IN_REG (gr += 2); + ALWAYS_ON_STACK (stack_par_pos += 8); } else { NOT_IMPLEMENTED ("r8 on stack"); } @@ -529,7 +581,7 @@ emit_epilog (guint8 *p, MonoMethodSignature *sig, guint stack_size) { /* function epilog */ ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ - ppc_lwz (p, ppc_r0, 4, ppc_r11); /* r0 <--- r11[4] load return address */ + ppc_lwz (p, ppc_r0, RET_ADDR_OFFSET, ppc_r11); /* r0 <--- r11[4] load return address */ ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ @@ -578,7 +630,11 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) } +#ifdef __APPLE__ +#define MINV_POS 24 /* MonoInvocation structure offset on stack */ +#else #define MINV_POS 8 /* MonoInvocation structure offset on stack */ +#endif #define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) #define OBJ_POS 8 #define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) @@ -632,7 +688,7 @@ mono_create_method_pointer (MonoMethod *method) ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ ppc_mflr (p, ppc_r0); /* r0 <--- LR */ ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ - ppc_stw (p, ppc_r0, stack_size + 4, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ + ppc_stw (p, ppc_r0, stack_size + RET_ADDR_OFFSET, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ /* let's fill MonoInvocation */ @@ -801,7 +857,7 @@ mono_create_method_pointer (MonoMethod *method) /* epilog */ ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ - ppc_lwz (p, ppc_r0, 4, ppc_r11); /* r0 <--- r11[4] load return address */ + ppc_lwz (p, ppc_r0, RET_ADDR_OFFSET, ppc_r11); /* r0 <--- r11[4] load return address */ ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ -- cgit v1.1 From 898dd64bddf69974ae9a22d6aa0ce9625fc9a5a0 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 21 Jan 2003 16:33:33 +0000 Subject: Tue Jan 21 17:29:53 CET 2003 Paolo Molaro * ppc/ppc-codegen.h: completed ppc native code generation by Taylor Christopher P . svn path=/trunk/mono/; revision=10778 --- ChangeLog | 5 + ppc/ppc-codegen.h | 474 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 479 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8669fb8..d15d142 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Tue Jan 21 17:29:53 CET 2003 Paolo Molaro + + * ppc/ppc-codegen.h: completed ppc native code generation by + Taylor Christopher P . + Fri Jan 17 21:14:18 CET 2003 Paolo Molaro * ppc/tramp.c: adapted to work for MacOSX (from a patch by diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index c3f79b2..d3473ad 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -1,5 +1,7 @@ /* Copyright (C) 2001 Radek Doulik + + for testing do the following: ./test | as -o test.o */ #ifndef PPC_H @@ -111,5 +113,477 @@ typedef enum { #define ppc_stfs(c,S,d,a) ppc_emit32 (c, (52 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_stfd(c,S,d,a) ppc_emit32 (c, (54 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) +/*********************************************************************** +The macros below were tapped out by Christopher Taylor +from 18 November 2002 to 19 December 2002. + +Special thanks to rodo, lupus, dietmar, miguel, and duncan for patience, +and motivation. + +The macros found in this file are based on the assembler instructions found +in Motorola and Digital DNA's: + +"Programming Enviornments Manual For 32-bit Implementations of the PowerPC Architecture" + +MPCFPE32B/AD +12/2001 +REV2 + +see pages 326 - 524 for detailed information regarding each instruction + +Also see the "Ximian Copyright Agreement, 2002" for more information regarding +my and Ximian's copyright to this code. ;) +*************************************************************************/ + +#define ppc_addx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (266 << 1) | Rc) +#define ppc_add(c,D,A,B) ppc_addx(c,D,A,B,0,0) +#define ppc_addd(c,D,A,B) ppc_addx(c,D,A,B,0,1) +#define ppc_addo(c,D,A,B) ppc_addx(c,D,A,B,1,0) +#define ppc_addod(c,D,A,B) ppc_addx(c,D,A,B,1,1) + +#define ppc_addcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (10 << 1) | Rc) +#define ppc_addc(c,D,A,B) ppc_addcx(c,D,A,B,0,0) +#define ppc_addcd(c,D,A,B) ppc_addcx(c,D,A,B,0,1) +#define ppc_addco(c,D,A,B) ppc_addcx(c,D,A,B,1,0) +#define ppc_addcod(c,D,A,B) ppc_addcx(c,D,A,B,1,1) + +#define ppc_addex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (138 << 1) | Rc) +#define ppc_adde(c,D,A,B) ppc_addex(c,D,A,B,0,0) +#define ppc_added(c,D,A,B) ppc_addex(c,D,A,B,0,1) +#define ppc_addeo(c,D,A,B) ppc_addex(c,D,A,B,1,0) +#define ppc_addeod(c,D,A,B) ppc_addex(c,D,A,B,1,1) + +#define ppc_addic(c,D,A,d) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_addicd(c,D,A,d) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) + +#define ppc_addmex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (234 << 1) | RC) +#define ppc_addme(c,D,A) ppc_addmex(c,D,A,0,0) +#define ppc_addmed(c,D,A) ppc_addmex(c,D,A,0,1) +#define ppc_addmeo(c,D,A) ppc_addmex(c,D,A,1,0) +#define ppc_addmeod(c,D,A) ppc_addmex(c,D,A,1,1) + +#define ppc_addzex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (202 << 1) | RC) +#define ppc_addze(c,D,A) ppc_addzex(c,D,A,0,0) +#define ppc_addzed(c,D,A) ppc_addzex(c,D,A,0,1) +#define ppc_addzeo(c,D,A) ppc_addzex(c,D,A,1,0) +#define ppc_addzeod(c,D,A) ppc_addzex(c,D,A,1,1) + +#define ppc_andx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (28 << 1) | RC) +#define ppc_and(c,S,A,B) ppc_andx(c,S,A,B,0) +#define ppc_andd(c,S,A,B) ppc_andx(c,S,A,B,1) + +#define ppc_andcx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (60 << 1) | RC) +#define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0) +#define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1) + +#define ppc_andid(c,S,A,d) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | (0x0000 || (guint16)(d))) +#define ppc_andisd(c,S,A,d) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d) || 0x0000)) + +#define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK) +#define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0) +#define ppc_bca(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,0) +#define ppc_bcl(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,1) +#define ppc_bcla(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,1) + +#define ppc_bcctrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (528 << 1) | LK) +#define ppc_bcctr(c,BO,BI) ppc_bcctrx(c,BO,BI,0) +#define ppc_bcctrl(c,BO,BI) ppc_bcctrx(c,BO,BI,1) + +#define ppc_bnectrp(c,BO,BI) ppc_bcctr(c,BO,BI) +#define ppc_bnectrlp(c,BO,BI) ppc_bcctr(c,BO,BI) + +#define ppc_bclrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (16 << 1) | LK) +#define ppc_bclr(c,BO,BI) ppc_bclrx(c,BO,BI,0) +#define ppc_bclrl(c,BO,BI) ppc_bclrx(c,BO,BI,1) + +#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI) +#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI) + +#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (0x00000 << 1) | 0 ) +#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | B) +#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (32 << 1) | 0 ) +#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | B) + +#define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc) +#define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0) +#define ppc_cntlzwd(c,S,A) ppc_cntlzwx(c,S,A,1) + +#define ppc_crand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (257 << 1) | 0) +#define ppc_crandc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (129 << 1) | 0) +#define ppc_creqv(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (289 << 1) | 0) +#define ppc_crnand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (225 << 1) | 0) +#define ppc_crnor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (33 << 1) | 0) +#define ppc_cror(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (449 << 1) | 0) +#define ppc_crorc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (417 << 1) | 0) +#define ppc_crxor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (193 << 1) | 0) + +#define ppc_dcba(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (758 << 1) | 0) +#define ppc_dcbf(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (86 << 1) | 0) +#define ppc_dcbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (470 << 1) | 0) +#define ppc_dcbst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (54 << 1) | 0) +#define ppc_dcbt(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (278 << 1) | 0) +#define ppc_dcbtst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (246 << 1) | 0) +#define ppc_dcbz(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (1014 << 1) | 0) + +#define ppc_divwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (491 << 1) | Rc) +#define ppc_divw(c,D,A,B) ppc_divwx(c,D,A,B,0,0) +#define ppc_divwd(c,D,A,B) ppc_divwx(c,D,A,B,0,1) +#define ppc_divwo(c,D,A,B) ppc_divwx(c,D,A,B,1,0) +#define ppc_divwod(c,D,A,B) ppc_divwx(c,D,A,B,1,1) + +#define ppc_divwux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (459 << 1) | Rc) +#define ppc_divwu(c,D,A,B) ppc_divwux(c,D,A,B,0,0) +#define ppc_divwud(c,D,A,B) ppc_divwux(c,D,A,B,0,1) +#define ppc_divwuo(c,D,A,B) ppc_divwux(c,D,A,B,1,0) +#define ppc_divwuod(c,D,A,B) ppc_divwux(c,D,A,B,1,1) + +#define ppc_eciwx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (310 << 1) | 0) +#define ppc_ecowx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (438 << 1) | 0) +#define ppc_eieio(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (854 << 1) | 0) + +#define ppc_eqvx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (284 << 1) | Rc) +#define ppc_eqv(c,A,S,B) ppc_eqvx(c,A,S,B,0) +#define ppc_eqvd(c,A,S,B) ppc_eqvx(c,A,S,B,1) + +#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 12) | (954 << 1) | Rc) +#define ppc_extsb(c,A,S) ppc_extsbx(c,A,S,0) +#define ppc_extsbd(c,A,S) ppc_extsbx(c,A,S,1) + +#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 12) | (922 << 1) | Rc) +#define ppc_extsh(c,A,S) ppc_extshx(c,A,S,0) +#define ppc_extshd(c,A,S) ppc_extshx(c,A,S,1) + +#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (264 << 1) | Rc) +#define ppc_fabs(c,D,B) ppc_fabsx(c,D,B,0) +#define ppc_fabsd(c,D,B) ppc_fabsx(c,D,B,1) + +#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (21 << 1) | Rc) +#define ppc_fadd(c,D,A,B) ppc_faddx(c,D,A,B,0) +#define ppc_faddd(c,D,A,B) ppc_faddx(c,D,A,B,1) + +#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (21 << 1) | Rc) +#define ppc_fadds(c,D,A,B) ppc_faddsx(c,D,A,B,0) +#define ppc_faddsd(c,D,A,B) ppc_faddsx(c,D,A,B,1) + +#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 12) | (32 << 1) | 0) +#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 12) | (0 << 1) | 0) + +#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (14 << 1) | Rc) +#define ppc_fctiw(c,D,B) ppc_fctiwx(c,D,B,0) +#define ppc_fctiwd(c,D,B) ppc_fctiwx(c,D,B,1) + +#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (15 << 1) | Rc) +#define ppc_fctiwz(c,D,B) ppc_fctiwzx(c,D,B,0) +#define ppc_fctiwzd(c,D,B) ppc_fctiwzx(c,D,B,1) + +#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (18 << 1) | Rc) +#define ppc_fdiv(c,D,A,B) ppc_fdivx(c,D,A,B,0) +#define ppc_fdivd(c,D,A,B) ppc_fdivx(c,D,A,B,1) + +#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (18 << 1) | Rc) +#define ppc_fdivs(c,D,A,B) ppc_fdivsx(c,D,A,B,0) +#define ppc_fdivsd(c,D,A,B) ppc_fdivsx(c,D,A,B,1) + +#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (C << 7) | (29 << 1) | Rc) +#define ppc_fmadd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,0) +#define ppc_fmaddd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,1) + +#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (C << 7) | (29 << 1) | Rc) +#define ppc_fmadds(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,0) +#define ppc_fmaddsd(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,1) + +#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (72 << 1) | Rc) +#define ppc_fmr(c,D,B) ppc_fmrx(c,D,B,0) +#define ppc_fmrd(c,D,B) ppc_fmrx(c,D,B,1) + +#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (C << 7) | (28 << 1) | Rc) +#define ppc_fmsub(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,0) +#define ppc_fmsubd(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,1) + +#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (28 << 1) | Rc) +#define ppc_fmsubs(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,0) +#define ppc_fmsubsd(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,1) + +#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 7) | (25 << 1) | Rc) +#define ppc_fmul(c,D,A,C) ppc_fmulx(c,D,A,C,0) +#define ppc_fmuld(c,D,A,C) ppc_fmulx(c,D,A,C,1) + +#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 7) | (25 << 1) | Rc) +#define ppc_fmuls(c,D,A,C) ppc_fmulsx(c,D,A,C,0) +#define ppc_fmulsd(c,D,A,C) ppc_fmulsx(c,D,A,C,1) + +#define ppc_fnabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (136 << 1) | Rc) +#define ppc_fnabs(c,D,B) ppc_fnabsx(c,D,B,0) +#define ppc_fnabsd(c,D,B) ppc_fnabsx(c,D,B,1) + +#define ppc_fnegx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (40 << 1) | Rc) +#define ppc_fneg(c,D,B) ppc_fnegx(c,D,B,0) +#define ppc_fnegd(c,D,B) ppc_fnegx(c,D,B,1) + +#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (31 << 1) | Rc) +#define ppc_fnmadd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,0) +#define ppc_fnmaddd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,1) + +#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (31 << 1) | Rc) +#define ppc_fnmadds(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,0) +#define ppc_fnmaddsd(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,1) + +#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (30 << 1) | Rc) +#define ppc_fnmsub(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,0) +#define ppc_fnmsubd(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,1) + +#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (30 << 1) | Rc) +#define ppc_fnmsubs(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,0) +#define ppc_fnmsubsd(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,1) + +#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (24 << 1) | Rc) +#define ppc_fres(c,D,B) ppc_fresx(c,D,B,0) +#define ppc_fresd(c,D,B) ppc_fresx(c,D,B,1) + +#define ppc_frspx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (12 << 1) | Rc) +#define ppc_frsp(c,D,B) ppc_frspx(c,D,B,0) +#define ppc_frspd(c,D,B) ppc_frspx(c,D,B,1) + +#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (26 << 1) | Rc) +#define ppc_frsqrte(c,D,B) ppc_frsqrtex(c,D,B,0) +#define ppc_frsqrted(c,D,B) ppc_frsqrtex(c,D,B,1) + +#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (23 << 1) | Rc) +#define ppc_fsel(c,D,A,C,B) ppc_fselx(c,D,A,C,B,0) +#define ppc_fseld(c,D,A,C,B) ppc_fselx(c,D,A,C,B,1) + +#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (22 << 1) | Rc) +#define ppc_fsqrt(c,D,B) ppc_fsqrtx(c,D,B,0) +#define ppc_fsqrtd(c,D,B) ppc_fsqrtx(c,D,B,1) + +#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (22 << 1) | Rc) +#define ppc_fsqrts(c,D,B) ppc_fsqrtsx(c,D,B,0) +#define ppc_fsqrtsd(c,D,B) ppc_fsqrtsx(c,D,B,1) + +#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 7) | (20 << 1) | Rc) +#define ppc_fsub(c,D,A,B) ppc_fsubx(c,D,A,B,0) +#define ppc_fsubd(c,D,A,B) ppc_fsubx(c,D,A,B,1) + +#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 7) | (20 << 1) | Rc) +#define ppc_fsubs(c,D,A,B) ppc_fsubsx(c,D,A,B,0) +#define ppc_fsubsd(c,D,A,B) ppc_fsubsx(c,D,A,B,1) + +#define ppc_icbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (982 << 1) | 0) + +#define ppc_isync(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (150 << 1) | 0) + +#define ppc_lbzu(c,D,A,d) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lbzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (119 << 1) | 0) +#define ppc_lbzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (87 << 1) | 0) + +#define ppc_lfdu(c,D,A,d) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lfdux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (631 << 1) | 0) +#define ppc_lfdx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (599 << 1) | 0) + +#define ppc_lfsu(c,D,A,d) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lfsux(c,D,A,d) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (567 << 1) | 0) +#define ppc_lfsx(c,D,A,d) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (535 << 1) | 0) + +#define ppc_lha(c,D,A,d) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lhau(c,D,A,d) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lhaux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (375 << 1) | 0) +#define ppc_lhax(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (343 << 1) | 0) +#define ppc_lhbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (790 << 1) | 0) +#define ppc_lhzu(c,D,A,d) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d) + +#define ppc_lhzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (311 << 1) | 0) +#define ppc_lhzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (279 << 1) | 0) + +#define ppc_lmw(c,D,A,d) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d) + +#define ppc_lswi(c,D,A,NB) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (NB << 11) | (597 << 1) | 0) +#define ppc_lswx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (533 << 1) | 0) +#define ppc_lwarx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (20 << 1) | 0) +#define ppc_lwbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (534 << 1) | 0) + +#define ppc_lwzu(c,D,A,d) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lwzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (55 << 1) | 0) +#define ppc_lwzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (23 << 1) | 0) + +#define ppc_mcrf(c,crfD,crfS) ppc_emit32(c, (19 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | 0) +#define ppc_mcrfs(c,crfD,crfS) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | (0 << 16) | (64 << 1) | 0) +#define ppc_mcrxr(c,crfD) ppc_emit32(c, (31 << 26) | (crfD << 23) | (0 << 16) | (512 << 1) | 0) + +#define ppc_mfcr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (19 << 1) | 0) +#define ppc_mffsx(c,D,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (583 << 1) | Rc) +#define ppc_mffs(c,D) ppc_mffsx(c,D,0) +#define ppc_mffsd(c,D) ppc_mffsx(c,D,1) +#define ppc_mfmsr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (83 << 1) | 0) +#define ppc_mfsr(c,D,SR) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (595 << 1) | 0) +#define ppc_mfsrin(c,D,B) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (B << 11) | (659 << 1) | 0) +#define ppc_mftb(c,D,TBR) ppc_emit32(c, (31 << 26) | (D << 21) | (TBR << 11) | (371 << 1) | 0) + +#define ppc_mtcrf(c,CRM,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (CRM << 12) | (0 << 11) | (144 << 1) | 0) + +#define ppc_mtfsb0x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (70 << 1) | Rc) +#define ppc_mtfsb0(c,CRB) ppc_mtfsb0x(c,CRB,0) +#define ppc_mtfsb0d(c,CRB) ppc_mtfsb0x(c,CRB,1) + +#define ppc_mtfsb1x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (38 << 1) | Rc) +#define ppc_mtfsb1(c,CRB) ppc_mtfsb1x(c,CRB,0) +#define ppc_mtfsb1d(c,CRB) ppc_mtfsb1x(c,CRB,1) + +#define ppc_mtfsfx(c,FM,B,Rc) ppc_emit32(c, (63 << 26) | (0 << 25) | (FM << 22) | (0 << 21) | (B << 11) | (711 << 1) | Rc) +#define ppc_mtfsf(c,FM,B) ppc_mtfsfx(c,FM,B,0) +#define ppc_mtfsfd(c,FM,B) ppc_mtfsfx(c,FM,B,1) + +#define ppc_mtfsfix(c,crfD,IMM,Rc) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 16) | (IMM << 12) | (0 << 11) | (134 << 1) | Rc) +#define ppc_mtfsfi(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,0) +#define ppc_mtfsfid(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,1) + +#define ppc_mtmsr(c, S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 11) | (146 << 1) | 0) + +#define ppc_mtsr(c,SR,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (210 << 1) | 0) +#define ppc_mtsrin(c,S,B) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 16) | (B << 11) | (242 << 1) | 0) + +#define ppc_mulhwx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (75 << 1) | Rc) +#define ppc_mulhw(c,D,A,B) ppc_mulhwx(c,D,A,B,0) +#define ppc_mulhwd(c,D,A,B) ppc_mulhwx(c,D,A,B,1) + +#define ppc_mulhwux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (11 << 1) | Rc) +#define ppc_mulhwu(c,D,A,B) ppc_mulhwux(c,D,A,B,0) +#define ppc_mulhwud(c,D,A,B) ppc_mulhwux(c,D,A,B,1) + +#define ppc_mulli(c,D,A,SIMM) ppc_emit32(c, ((07) << 26) | (D << 21) | (A << 16) | (guint16)(SIMM)) + +#define ppc_mullwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (235 << 1) | Rc) +#define ppc_mullw(c,D,A,B) ppc_mullwx(c,D,A,B,0,0) +#define ppc_mullwd(c,D,A,B) ppc_mullwx(c,D,A,B,0,1) +#define ppc_mullwo(c,D,A,B) ppc_mullwx(c,D,A,B,1,0) +#define ppc_mullwod(c,D,A,B) ppc_mullwx(c,D,A,B,1,1) + +#define ppc_nandx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (476 << 1) | Rc) +#define ppc_nand(c,A,S,B) ppc_nandx(c,A,S,B,0) +#define ppc_nandd(c,A,S,B) ppc_nandx(c,A,S,B,1) + +#define ppc_negx(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (104 << 1) | Rc) +#define ppc_neg(c,D,A) ppc_negx(c,D,A,0,0) +#define ppc_negd(c,D,A) ppc_negx(c,D,A,0,1) +#define ppc_nego(c,D,A) ppc_negx(c,D,A,1,0) +#define ppc_negod(c,D,A) ppc_negx(c,D,A,1,1) + +#define ppc_norx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (124 << 1) | Rc) +#define ppc_nor(c,A,S,B) ppc_norx(c,A,S,B,0) +#define ppc_nord(c,A,S,B) ppc_norx(c,A,S,B,1) + +#define ppc_orx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (444 << 1) | Rc) +#define ppc_ord(c,A,S,B) ppc_orx(c,A,S,B,1) + +#define ppc_orcx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (412 << 1) | Rc) +#define ppc_orc(c,A,S,B) ppc_orcx(c,A,S,B,0) +#define ppc_orcd(c,A,S,B) ppc_orcx(c,A,S,B,1) + +#define ppc_oris(c,A,S,UIMM) ppc_emit32(c, (25 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM)) + +#define ppc_rfi(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (50 << 1) | 0) + +#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0) +#define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1) + +#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0) +#define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1) + +#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0) +#define ppc_rlwnmd(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,1) + +#define ppc_sc(c) ppc_emit32(c, (17 << 26) | (0 << 2) | (1 << 1) | 0) + +#define ppc_slwx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (24 << 1) | Rc) +#define ppc_slw(c,S,A,B) ppc_slwx(c,S,A,B,0) +#define ppc_slwd(c,S,A,B) ppc_slwx(c,S,A,B,1) + +#define ppc_srawx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (792 << 1) | Rc) +#define ppc_sraw(c,A,S,B) ppc_srawx(c,A,S,B,0) +#define ppc_srawd(c,A,S,B) ppc_srawx(c,A,S,B,1) + +#define ppc_srawix(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (824 << 1) | Rc) +#define ppc_srawi(c,A,S,B) ppc_srawix(c,A,S,B,0) +#define ppc_srawid(c,A,S,B) ppc_srawix(c,A,S,B,1) + +#define ppc_srwx(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (536 << 1) | Rc) +#define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0) +#define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1) + +#define ppc_stbu(c,S,A,D) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | D) + +#define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0) +#define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0) + +#define ppc_stfdu(c,S,A,D) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | D) + +#define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0) +#define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0) + +#define ppc_stfsu(c,S,A,D) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0) +#define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0) +#define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0) +#define ppc_sthu(c,S,A,D) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | D) +#define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0) +#define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0) +#define ppc_stmw(c,S,A,D) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0) +#define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0) +#define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0) +#define ppc_stwcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (150 << 1) | 1) +#define ppc_stwux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (183 << 1) | 0) +#define ppc_stwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (151 << 1) | 0) + +#define ppc_subfx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (40 << 1) | Rc) +#define ppc_subf(c,D,A,B) ppc_subfx(c,D,A,B,0,0) +#define ppc_subfd(c,D,A,B) ppc_subfx(c,D,A,B,0,1) +#define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0) +#define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1) + +#define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc) +#define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0) +#define ppc_subfcd(c,D,A,B) ppc_subfcx(c,D,A,B,0,1) +#define ppc_subfco(c,D,A,B) ppc_subfcx(c,D,A,B,1,0) +#define ppc_subfcod(c,D,A,B) ppc_subfcx(c,D,A,B,1,1) + +#define ppc_subfex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (136 << 1) | Rc) +#define ppc_subfe(c,D,A,B) ppc_subfex(c,D,A,B,0,0) +#define ppc_subfed(c,D,A,B) ppc_subfex(c,D,A,B,0,1) +#define ppc_subfeo(c,D,A,B) ppc_subfex(c,D,A,B,1,0) +#define ppc_subfeod(c,D,A,B) ppc_subfex(c,D,A,B,1,1) + +#define ppc_subfic(c,D,A,SIMM) ppc_emit32(c, (8 << 26) | (D << 21) | (A << 16) | (guint16)(SIMM)) + +#define ppc_subfmex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (232 << 1) | Rc) +#define ppc_subfme(c,D,A) ppc_subfmex(c,D,A,0,0) +#define ppc_subfmed(c,D,A) ppc_subfmex(c,D,A,0,1) +#define ppc_subfmeo(c,D,A) ppc_subfmex(c,D,A,1,0) +#define ppc_subfmeod(c,D,A) ppc_subfmex(c,D,A,1,1) + +#define ppc_subfzex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (200 << 1) | Rc) +#define ppc_subfze(c,D,A) ppc_subfzex(c,D,A,0,0) +#define ppc_subfzed(c,D,A) ppc_subfzex(c,D,A,0,1) +#define ppc_subfzeo(c,D,A) ppc_subfzex(c,D,A,1,0) +#define ppc_subfzeod(c,D,A) ppc_subfzex(c,D,A,1,1) + +#define ppc_sync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (598 << 1) | 0) +#define ppc_tlbia(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (370 << 1) | 0) +#define ppc_tlbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 16) | (B << 11) | (306 << 1) | 0) +#define ppc_tlbsync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (566 << 1) | 0) + +#define ppc_tw(c,TO,A,B) ppc_emit32(c, (31 << 26) | (TO << 21) | (A << 16) | (B << 11) | (4 << 1) | 0) +#define ppc_twi(c,TO,A,SIMM) ppc_emit32(c, (3 << 26) | (TO << 21) | (A << 16) | (guint16)(SIMM)) + +#define ppc_xorx(c,A,S,B,RC) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (316 << 1) | RC) +#define ppc_xor(c,A,S,B) ppc_xorx(c,A,S,B,0) +#define ppc_xord(c,A,S,B) ppc_xorx(c,A,S,B,1) + +#define ppc_xori(c,S,A,SIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(SIMM)) +#define ppc_xoris(c,S,A,SIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(SIMM)) + +/* this marks the end of my work, ct */ #endif -- cgit v1.1 From cc3953655f65398b40e11fdcc97b1ae47bebfdc1 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 27 Jan 2003 11:54:14 +0000 Subject: Mon Jan 27 12:49:10 CET 2003 Paolo Molaro * alpha/*: start of the port to the alpha architecture by Laramie Leavitt (). svn path=/trunk/mono/; revision=10942 --- ChangeLog | 5 + Makefile.am | 2 +- alpha/Makefile.am | 8 + alpha/alpha-codegen.h | 489 ++++++++++++++++++++++++++++++++++++++++++++++++++ alpha/test.c | 120 +++++++++++++ alpha/tramp.c | 16 ++ 6 files changed, 639 insertions(+), 1 deletion(-) create mode 100644 alpha/Makefile.am create mode 100644 alpha/alpha-codegen.h create mode 100644 alpha/test.c create mode 100644 alpha/tramp.c diff --git a/ChangeLog b/ChangeLog index d15d142..5fc4f6c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Mon Jan 27 12:49:10 CET 2003 Paolo Molaro + + * alpha/*: start of the port to the alpha architecture by + Laramie Leavitt (). + Tue Jan 21 17:29:53 CET 2003 Paolo Molaro * ppc/ppc-codegen.h: completed ppc native code generation by diff --git a/Makefile.am b/Makefile.am index 1a5ad6a..4f68e74 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 +DIST_SUBDIRS = x86 ppc sparc arm s390 alpha INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/alpha/Makefile.am b/alpha/Makefile.am new file mode 100644 index 0000000..8e0accf --- /dev/null +++ b/alpha/Makefile.am @@ -0,0 +1,8 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-alpha.la + +libmonoarch_alpha_la_SOURCES = tramp.c alpha-codegen.h + +noinst_PROGRAMS = test diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h new file mode 100644 index 0000000..aa27179 --- /dev/null +++ b/alpha/alpha-codegen.h @@ -0,0 +1,489 @@ +#ifndef __ALPHA_CODEGEN_H__ +#define __ALPHA_CODEGEN_H__ + +/* + http://ftp.digital.com/pub/Digital/info/semiconductor/literature/alphaahb.pdf +*/ + +typedef enum { + alpha_r0 = 0, + alpha_r1 = 1, + alpha_r2 = 2, + alpha_r3 = 3, + alpha_r4 = 4, + alpha_r5 = 5, + alpha_r6 = 6, + alpha_r7 = 7, + alpha_r8 = 8, + alpha_r9 = 9, + alpha_r10 = 10, + alpha_r11 = 11, + alpha_r12 = 12, + alpha_r13 = 13, + alpha_r14 = 14, + alpha_r15 = 15, + alpha_r16 = 16, + alpha_r17 = 17, + alpha_r18 = 18, + alpha_r19 = 19, + alpha_r20 = 20, + alpha_r21 = 21, + alpha_r22 = 22, + alpha_r23 = 23, + alpha_r24 = 24, + alpha_r25 = 25, + alpha_r26 = 26, + alpha_r27 = 27, + alpha_r28 = 28, + alpha_r29 = 29, + alpha_r30 = 30, + alpha_r31 = 31, alpha_zero = 31, + /* aliases */ + alpha_v0 = 0, /* return value */ + + alpha_t0 = 1, /* temporaries */ + alpha_t1 = 2, + alpha_t2 = 3, + alpha_t3 = 4, + alpha_t4 = 5, + alpha_t5 = 6, + alpha_t6 = 7, + alpha_t7 = 8, + + alpha_s0 = 9, /* saved registers */ + alpha_s1 = 10, + alpha_s2 = 11, + alpha_s3 = 12, + alpha_s4 = 13, + alpha_s5 = 14, + alpha_s6 = 15, + + alpha_a0 = 16, /* argument registers */ + alpha_a1 = 17, + alpha_a2 = 18, + alpha_a3 = 19, + alpha_a4 = 20, + alpha_a5 = 21, + alpha_t8 = 22, + alpha_t9 = 23, + alpha_t10 = 24, + alpha_t11 = 25, + alpha_ra = 26, /* Return Address */ + alpha_t12 = 27, + alpha_altreg = 28, + alpha_gp = 29, /* Global Pointer */ + alpha_sp = 30, /* Stack Pointer */ +} AlphaRegister; + +typedef enum { + /* floating point registers */ + alpha_f0 = 0, + alpha_f1 = 1, + alpha_f2 = 2, + alpha_f3 = 3, + alpha_f4 = 4, + alpha_f5 = 5, + alpha_f6 = 6, + alpha_f7 = 7, + alpha_f8 = 8, + alpha_f9 = 9, + alpha_f10 = 10, + alpha_f11 = 11, + alpha_f12 = 12, + alpha_f13 = 13, + alpha_f14 = 14, + alpha_f15 = 15, + alpha_f16 = 16, + alpha_f17 = 17, + alpha_f18 = 18, + alpha_f19 = 19, + alpha_f20 = 20, + alpha_f21 = 21, + alpha_f22 = 22, + alpha_f23 = 23, + alpha_f24 = 24, + alpha_f25 = 25, + alpha_f26 = 26, + alpha_f27 = 27, + alpha_f28 = 28, + alpha_f29 = 29, + alpha_f30 = 30, + alpha_f31 = 31, alpha_fzero = 31, + /* aliases */ + alpha_fv0 = 0, /* return value */ + alpha_fv1 = 1, + + alpha_fs0 = 2, /* saved registers */ + alpha_fs1 = 3, + alpha_fs2 = 4, + alpha_fs3 = 5, + alpha_fs4 = 6, + alpha_fs5 = 7, + alpha_fs6 = 8, + alpha_fs7 = 9, + + alpha_ft0 = 10, /* temporary */ + alpha_ft1 = 11, + alpha_ft2 = 12, + alpha_ft3 = 13, + alpha_ft4 = 14, + alpha_ft5 = 15, + + alpha_fa0 = 16, /* args */ + alpha_fa1 = 17, + alpha_fa2 = 18, + alpha_fa3 = 19, + alpha_fa4 = 20, + alpha_fa5 = 21, + + alpha_ft6 = 22, + alpha_ft7 = 23, + alpha_ft8 = 24, + alpha_ft9 = 25, + alpha_ft10 = 26, + alpha_ft11 = 27, + alpha_ft12 = 28, + alpha_ft13 = 29, + alpha_ft14 = 30 +} AlphaFPRegister; + +/***************************************/ + +#define __alpha_int_32 unsigned int + + +/***************************************/ +#define AXP_OFF26_MASK 0x03ffffff +#define AXP_OFF21_MASK 0x01fffff +#define AXP_OFF16_MASK 0x0ffff +#define AXP_OFF14_MASK 0x03fff +#define AXP_OFF13_MASK 0x01fff +#define AXP_OFF11_MASK 0x07ff +#define AXP_OFF8_MASK 0x0ff +#define AXP_OFF7_MASK 0x07f +#define AXP_OFF6_MASK 0x03f +#define AXP_OFF5_MASK 0x01f +#define AXP_OFF4_MASK 0x0f +#define AXP_OFF2_MASK 0x03 +#define AXP_OFF1_MASK 0x01 + + +#define AXP_REG_MASK AXP_OFF5_MASK +#define AXP_REGSIZE 5 + +#define AXP_OP_SHIFT 26 +#define AXP_REG1_SHIFT 21 +#define AXP_REG2_SHIFT 16 +#define AXP_MEM_BR_SHIFT 14 +#define AXP_LIT_SHIFT 13 + +#define alpha_opcode( op ) \ + ((op&AXP_OFF6_MASK) << AXP_OP_SHIFT) + +#define alpha_reg1( reg ) \ + ((reg & AXP_REG_MASK) << AXP_REG1_SHIFT) + +#define alpha_reg2( reg ) \ + ((reg & AXP_REG_MASK) << AXP_REG2_SHIFT) + +#define alpha_reg3( reg ) \ + (reg & AXP_REG_MASK) + +#define alpha_fp_func( func ) \ + ((func & AXP_OFF11_MASK) << AXP_REGSIZE) + +#define alpha_op_func( func ) \ + ((func & AXP_OFF7_MASK) << AXP_REGSIZE) + +#define alpha_op_literal( lit ) \ + ((lit & AXP_OFF7_MASK) << AXP_LIT_SHIFT) + +#define alpha_mem_br_func( func, hint ) \ + (((func & AXP_OFF2_MASK ) << AXP_MEM_BR_SHIFT ) | (hint&AXP_OFF14_MASK)) + +#define alpha_mem_fc_func( func ) \ + (func && AXP_OFF16_MASK) + + + +#define alpha_encode_hw4_mem( op, func ) \ + (alpha_opcode( op ) | (( func & 0x0f ) << 12)) + +#define alpha_encode_hw5_mem( op, func ) \ + (alpha_opcode( op ) | (( func & 0x3f ) << 10)) + +#define alpha_encode_hw6mem( op, func ) \ + (alpha_opcode( op ) | (( func & 0x0f ) << 12)) + +#define alpha_encode_hw6mem_br( op, func ) \ + (alpha_opcode( op ) | (( func & 0x07 ) << 13)) + + +/*****************************************/ + + +#define alpha_encode_palcall( ins, op, func ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | ( func & AXP_OFF26_MASK )) + +#define alpha_encode_mem( ins, op, Rdest, Rsrc, offset ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rdest ) | \ + alpha_reg2( Rsrc ) | (offset & AXP_OFF16_MASK )) + +#define alpha_encode_mem_fc( ins, op, func, Rdest, Rsrc, offset ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rdest ) | \ + alpha_reg2( Rsrc ) | alpha_mem_fc_func( func )) + +#define alpha_encode_mem_br( ins, op, func, Rdest, Rsrc, hint ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rdest ) | \ + alpha_reg2( Rsrc ) | alpha_mem_br_func( func, hint ) ) + +#define alpha_encode_branch( ins, op, Reg, offset ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Reg ) | \ + (offset & AXP_OFF21_MASK )) + +#define alpha_encode_op( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ + alpha_reg2( Rsrc2 ) | alpha_op_func( func ) | \ + alpha_reg3( Rdest )) + + +#define alpha_encode_opl( ins, op, func, Rsrc, lit, Rdest ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ + alpha_op_literal(lit) | ( 1 << 12 ) | \ + alpha_op_func( func ) | alpha_reg3( Rdest ) ) + + +#define alpha_encode_fpop( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ + *((__alpha_int_32*)(ins))++ = ( 0 |\ + alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ + alpha_reg2( Rsrc2 ) | alpha_fp_func( func ) | \ + alpha_reg3( Rdest )) + + +#define alpha_reg_zero 31 + +/***************************************/ + +/* pal calls */ +/* #define alpha_halt( ins ) alpha_encode_palcall( ins, 0, 0 ) */ + +#define alpha_call_pal( ins, func ) alpha_encode_palcall( ins, 0, x ) + +/*memory*/ +#define alpha_lda( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x08, Rdest, Rsrc, offset ) +#define alpha_ldah( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x09, Rdest, Rsrc, offset ) +#define alpha_ldbu( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0a, Rdest, Rsrc, offset ) +#define alpha_ldq_u( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0b, Rdest, Rsrc, offset ) +#define alpha_ldwu( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0c, Rdest, Rsrc, offset ) +#define alpha_stw( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0d, Rdest, Rsrc, offset ) +#define alpha_stb( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0e, Rdest, Rsrc, offset ) +#define alpha_stq_u( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0f, Rdest, Rsrc, offset ) + +#define alpha_ldf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x20, Rdest, Rsrc, offset ) +#define alpha_ldg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x21, Rdest, Rsrc, offset ) +#define alpha_lds( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x22, Rdest, Rsrc, offset ) +#define alpha_ldt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) +#define alpha_stf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x24, Rdest, Rsrc, offset ) +#define alpha_stg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x25, Rdest, Rsrc, offset ) +#define alpha_sts( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x26, Rdest, Rsrc, offset ) +#define alpha_stt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x27, Rdest, Rsrc, offset ) + +#define alpha_ldl( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x28, Rdest, Rsrc, offset ) +#define alpha_ldq( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x29, Rdest, Rsrc, offset ) +#define alpha_ldl_l( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2A, Rdest, Rsrc, offset ) +#define alpha_ldq_l( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2B, Rdest, Rsrc, offset ) +#define alpha_stl( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2C, Rdest, Rsrc, offset ) +#define alpha_stq( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2D, Rdest, Rsrc, offset ) +#define alpha_stl_c( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2E, Rdest, Rsrc, offset ) +#define alpha_stq_c( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2F, Rdest, Rsrc, offset ) + + +/* branch*/ +#define alpha_jmp( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x0, alpha_reg_zero, Rsrc, hint ) +#define alpha_jsr( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x1, alpha_reg_zero, Rsrc, hint ) +#define alpha_ret( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x2, alpha_reg_zero, Rsrc, hint ) +#define alpha_jsrco( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x3, alpha_reg_zero, Rsrc, hint ) + +#define alpha_br( ins, Reg, offset ) alpha_encode_branch( ins, 0x30, Reg, offset ) +#define alpha_fbeq( ins, Reg, offset ) alpha_encode_branch( ins, 0x31, Reg, offset ) +#define alpha_fblt( ins, Reg, offset ) alpha_encode_branch( ins, 0x32, Reg, offset ) +#define alpha_fble( ins, Reg, offset ) alpha_encode_branch( ins, 0x33, Reg, offset ) +#define alpha_bsr( ins, Reg, offset ) alpha_encode_branch( ins, 0x34, Reg, offset ) +#define alpha_fbne( ins, Reg, offset ) alpha_encode_branch( ins, 0x35, Reg, offset ) +#define alpha_fbge( ins, Reg, offset ) alpha_encode_branch( ins, 0x36, Reg, offset ) +#define alpha_fbgt( ins, Reg, offset ) alpha_encode_branch( ins, 0x37, Reg, offset ) +#define alpha_blbc( ins, Reg, offset ) alpha_encode_branch( ins, 0x38, Reg, offset ) +#define alpha_beq( ins, Reg, offset ) alpha_encode_branch( ins, 0x39, Reg, offset ) +#define alpha_blt( ins, Reg, offset ) alpha_encode_branch( ins, 0x3A, Reg, offset ) +#define alpha_ble( ins, Reg, offset ) alpha_encode_branch( ins, 0x3B, Reg, offset ) +#define alpha_blbs( ins, Reg, offset ) alpha_encode_branch( ins, 0x3C, Reg, offset ) +#define alpha_bne( ins, Reg, offset ) alpha_encode_branch( ins, 0x3D, Reg, offset ) +#define alpha_bge( ins, Reg, offset ) alpha_encode_branch( ins, 0x3E, Reg, offset ) +#define alpha_bgt( ins, Reg, offset ) alpha_encode_branch( ins, 0x3F, Reg, offset ) + + +/* integer */ +/*//#define alpha_sextl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x00, Rsrc1, Rsrc2, Rdest ) +//#define alpha_sextl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x00, Rsrc1, lit, Rdest ) +*/ +#define alpha_addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x00, Rsrc1, Rsrc2, Rdest ) +#define alpha_addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x00, Rsrc1, lit, Rdest ) +#define alpha_s4addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x02, Rsrc1, Rsrc2, Rdest ) +#define alpha_s4addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x02, Rsrc1, lit, Rdest ) +//#define alpha_negl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x09, Rsrc1, Rsrc2, Rdest ) +//#define alpha_negl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x09, Rsrc1, lit, Rdest ) +#define alpha_subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x09, Rsrc1, Rsrc2, Rdest ) +#define alpha_subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x09, Rsrc1, lit, Rdest ) +#define alpha_s4subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x0B, Rsrc1, Rsrc2, Rdest ) +#define alpha_s4subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x0B, Rsrc1, lit, Rdest ) +#define alpha_cmpbge( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x0F, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpbge_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x0F, Rsrc1, lit, Rdest ) +#define alpha_s8addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x12, Rsrc1, Rsrc2, Rdest ) +#define alpha_s8addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x12, Rsrc1, lit, Rdest ) +#define alpha_s8subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x1B, Rsrc1, Rsrc2, Rdest ) +#define alpha_s8subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x1B, Rsrc1, lit, Rdest ) +#define alpha_cmpult( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x1d, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpult_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x1d, Rsrc1, lit, Rdest ) +#define alpha_addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x20, Rsrc1, Rsrc2, Rdest ) +#define alpha_addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x20, Rsrc1, lit, Rdest ) +#define alpha_s4addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x22, Rsrc1, Rsrc2, Rdest ) +#define alpha_s4addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x22, Rsrc1, lit, Rdest ) +//#define alpha_negq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x29, Rsrc1, Rsrc2, Rdest ) +//#define alpha_negq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x29, Rsrc1, lit, Rdest ) +#define alpha_subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x29, Rsrc1, Rsrc2, Rdest ) +#define alpha_subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x29, Rsrc1, lit, Rdest ) +#define alpha_s4subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x2B, Rsrc1, Rsrc2, Rdest ) +#define alpha_s4subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x2B, Rsrc1, lit, Rdest ) +#define alpha_cmpeq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x2D, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpeq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x2D, Rsrc1, lit, Rdest ) +#define alpha_s8addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x32, Rsrc1, Rsrc2, Rdest ) +#define alpha_s8addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x32, Rsrc1, lit, Rdest ) +#define alpha_s8subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x3B, Rsrc1, Rsrc2, Rdest ) +#define alpha_s8subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x3B, Rsrc1, lit, Rdest ) +#define alpha_cmpule( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x3D, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpule_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x3D, Rsrc1, lit, Rdest ) +#define alpha_addlv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x40, Rsrc1, Rsrc2, Rdest ) +#define alpha_addlv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x40, Rsrc1, lit, Rdest ) +//#define alpha_neglv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x49, Rsrc1, Rsrc2, Rdest ) +//#define alpha_neglv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x49, Rsrc1, lit, Rdest ) +#define alpha_sublv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x49, Rsrc1, Rsrc2, Rdest ) +#define alpha_sublv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x49, Rsrc1, lit, Rdest ) +#define alpha_cmplt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x4D, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmplt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x4D, Rsrc1, lit, Rdest ) +#define alpha_addqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x60, Rsrc1, Rsrc2, Rdest ) +#define alpha_addqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x60, Rsrc1, lit, Rdest ) +//#define alpha_negqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x69, Rsrc1, Rsrc2, Rdest ) +//#define alpha_negqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x69, Rsrc1, lit, Rdest ) +#define alpha_subqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x69, Rsrc1, Rsrc2, Rdest ) +#define alpha_subqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x69, Rsrc1, lit, Rdest ) +#define alpha_cmple( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x6D, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmple_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x6D, Rsrc1, lit, Rdest ) + +#define alpha_and( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x00, Rsrc1, Rsrc2, Rdest ) +#define alpha_and_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x00, Rsrc1, lit, Rdest ) +//#define alpha_andnot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x08, Rsrc1, Rsrc2, Rdest ) +//#define alpha_andnot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x08, Rsrc1, lit, Rdest ) +#define alpha_bic( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x08, Rsrc1, Rsrc2, Rdest ) +#define alpha_bic_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x08, Rsrc1, lit, Rdest ) +#define alpha_cmovlbs( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x14, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovlbs_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x14, Rsrc1, lit, Rdest ) +#define alpha_cmovlbc( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x16, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovlbc_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x16, Rsrc1, lit, Rdest ) +#define alpha_nop( ins ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, alpha_reg_zero, alpha_reg_zero ) +#define alpha_clr( ins, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, alpha_reg_zero, Rdest ) +#define alpha_mov1( ins, Rsrc, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, Rsrc, Rdest ) +#define alpha_mov2( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) +#define alpha_mov_( ins, lit, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, lit, Rdest ) +//#define alpha_or( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) +//#define alpha_or_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x20, Rsrc1, lit, Rdest ) +#define alpha_bis( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) +#define alpha_bis_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x20, Rsrc1, lit, Rdest ) +#define alpha_cmoveq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x24, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmoveq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x24, Rsrc1, lit, Rdest ) +#define alpha_cmovne( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x26, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovne_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x26, Rsrc1, lit, Rdest ) +#define alpha_not( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, alpha_reg_zero, Rsrc2, Rdest ) +#define alpha_not_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, alpha_reg_zero, lit, Rdest ) +#define alpha_ornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, Rsrc1, Rsrc2, Rdest ) +#define alpha_ornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, Rsrc1, lit, Rdest ) +#define alpha_xor( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x40, Rsrc1, Rsrc2, Rdest ) +#define alpha_xor_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x40, Rsrc1, lit, Rdest ) +#define alpha_cmovlt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x44, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovlt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x44, Rsrc1, lit, Rdest ) +#define alpha_cmovge( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x46, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovge_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x46, Rsrc1, lit, Rdest ) +#define alpha_eqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x48, Rsrc1, Rsrc2, Rdest ) +#define alpha_eqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) +//#define alpha_xornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x48, Rsrc1, Rsrc2, Rdest ) +//#define alpha_xornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) +#define alpha_ev56b_amask( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x61, alpha_reg_zero, Rsrc2, Rdest ) +#define alpha_ev56b_amask_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x61, alpha_reg_zero, lit, Rdest ) +#define alpha_cmovle( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x64, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovle_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x64, Rsrc1, lit, Rdest ) +#define alpha_cmovgt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x66, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovgt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x66, Rsrc1, lit, Rdest ) +//#define alpha_implver_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x6C, Rsrc1, lit, Rdest ) +#define alpha_cmovgt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x66, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmovgt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x66, Rsrc1, lit, Rdest ) + +#define alpha_mskbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x02, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x02, Rsrc1, lit, Rdest ) +#define alpha_extbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x06, Rsrc1, Rsrc2, Rdest ) +#define alpha_extbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x06, Rsrc1, lit, Rdest ) +#define alpha_insbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x0B, Rsrc1, Rsrc2, Rdest ) +#define alpha_insbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x0B, Rsrc1, lit, Rdest ) +#define alpha_mskwl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x12, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskwl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x12, Rsrc1, lit, Rdest ) +#define alpha_extwl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x16, Rsrc1, Rsrc2, Rdest ) +#define alpha_extwl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x16, Rsrc1, lit, Rdest ) +#define alpha_inswl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x1b, Rsrc1, Rsrc2, Rdest ) +#define alpha_inswl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x1b, Rsrc1, lit, Rdest ) +#define alpha_mskll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x22, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x22, Rsrc1, lit, Rdest ) +#define alpha_extll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x26, Rsrc1, Rsrc2, Rdest ) +#define alpha_extll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x26, Rsrc1, lit, Rdest ) +#define alpha_insll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x2b, Rsrc1, Rsrc2, Rdest ) +#define alpha_insll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x2b, Rsrc1, lit, Rdest ) +#define alpha_zap( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x30, Rsrc1, Rsrc2, Rdest ) +#define alpha_zap_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x30, Rsrc1, lit, Rdest ) +#define alpha_zapnot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x31, Rsrc1, Rsrc2, Rdest ) +#define alpha_zapnot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x31, Rsrc1, lit, Rdest ) +#define alpha_mskql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x32, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x32, Rsrc1, lit, Rdest ) +#define alpha_srl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x34, Rsrc1, Rsrc2, Rdest ) +#define alpha_srl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x34, Rsrc1, lit, Rdest ) +#define alpha_extql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x36, Rsrc1, Rsrc2, Rdest ) +#define alpha_extql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x36, Rsrc1, lit, Rdest ) +#define alpha_sll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x39, Rsrc1, Rsrc2, Rdest ) +#define alpha_sll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x39, Rsrc1, lit, Rdest ) +#define alpha_insql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x3b, Rsrc1, Rsrc2, Rdest ) +#define alpha_insql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x3b, Rsrc1, lit, Rdest ) +#define alpha_sra( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x3c, Rsrc1, Rsrc2, Rdest ) +#define alpha_sra_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x3c, Rsrc1, lit, Rdest ) +#define alpha_mskwh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x52, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskwh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x52, Rsrc1, lit, Rdest ) +#define alpha_inswh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x57, Rsrc1, Rsrc2, Rdest ) +#define alpha_inswh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x57, Rsrc1, lit, Rdest ) +#define alpha_extwh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x5a, Rsrc1, Rsrc2, Rdest ) +#define alpha_extwh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x5a, Rsrc1, lit, Rdest ) +#define alpha_msklh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x62, Rsrc1, Rsrc2, Rdest ) +#define alpha_msklh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x62, Rsrc1, lit, Rdest ) +#define alpha_inslh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x67, Rsrc1, Rsrc2, Rdest ) +#define alpha_inslh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x67, Rsrc1, lit, Rdest ) +#define alpha_extlh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x6a, Rsrc1, Rsrc2, Rdest ) +#define alpha_extlh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x6a, Rsrc1, lit, Rdest ) +#define alpha_mskqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x72, Rsrc1, Rsrc2, Rdest ) +#define alpha_mskqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x72, Rsrc1, lit, Rdest ) +#define alpha_insqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x77, Rsrc1, Rsrc2, Rdest ) +#define alpha_insqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x77, Rsrc1, lit, Rdest ) +#define alpha_extqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x7a, Rsrc1, Rsrc2, Rdest ) +#define alpha_extqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x7a, Rsrc1, lit, Rdest ) + +#endif diff --git a/alpha/test.c b/alpha/test.c new file mode 100644 index 0000000..f5ed45b --- /dev/null +++ b/alpha/test.c @@ -0,0 +1,120 @@ +#include "alpha-codegen.h" + +#include +#include +#include +#include + + +// +// Simple function which returns 10. +// +char * write_testfunc_1( char * p ) +{ +//00000001200004d0 : +// 1200004d0: f0 ff de 23 lda sp,-16(sp) +// 1200004d4: 00 00 5e b7 stq ra,0(sp) +// 1200004d8: 08 00 fe b5 stq fp,8(sp) +// 1200004dc: 0f 04 fe 47 mov sp,fp +// 1200004e0: 0a 00 3f 20 lda t0,10 +// 1200004e4: 00 04 e1 47 mov t0,v0 +// 1200004e8: 1e 04 ef 47 mov fp,sp +// 1200004ec: 00 00 5e a7 ldq ra,0(sp) +// 1200004f0: 08 00 fe a5 ldq fp,8(sp) +// 1200004f4: 10 00 de 23 lda sp,16(sp) +// 1200004f8: 01 80 fa 6b ret + + +// 1200004d0: f0 ff de 23 lda sp,-16(sp) + *p++ = 0xf0; *p++ = 0xff;*p++ = 0xde;*p++ = 0x23; +// 1200004d4: 00 00 5e b7 stq ra,0(sp) + *p++ = 0x00; *p++ = 0x00;*p++ = 0x5e;*p++ = 0xb7; +// 1200004d8: 08 00 fe b5 stq fp,8(sp) + *p++ = 0x08; *p++ = 0x00;*p++ = 0xfe;*p++ = 0xb5; +// 1200004dc: 0f 04 fe 47 mov sp,fp + *p++ = 0x0f; *p++ = 0x04;*p++ = 0xfe;*p++ = 0x47; +// 1200004e0: 0a 00 3f 20 lda t0,10 + *p++ = 0x0a; *p++ = 0x00;*p++ = 0x3f;*p++ = 0x20; +// 1200004e4: 00 04 e1 47 mov t0,v0 + *p++ = 0x00; *p++ = 0x04;*p++ = 0xe1;*p++ = 0x47; +// 1200004e8: 1e 04 ef 47 mov fp,sp + *p++ = 0x1e; *p++ = 0x04;*p++ = 0xef;*p++ = 0x47; +// 1200004ec: 00 00 5e a7 ldq ra,0(sp) + *p++ = 0x00; *p++ = 0x00;*p++ = 0x5e;*p++ = 0xa7; +// 1200004f0: 08 00 fe a5 ldq fp,8(sp) + *p++ = 0x08; *p++ = 0x00;*p++ = 0xfe;*p++ = 0xa5; +// 1200004f4: 10 00 de 23 lda sp,16(sp) + *p++ = 0x10; *p++ = 0x00;*p++ = 0xde;*p++ = 0x23; +// 1200004f8: 01 80 fa 6b ret + *p++ = 0x01; *p++ = 0x80;*p++ = 0xfa;*p++ = 0x6b; + return p; +} + +#define t0 1 +#define t1 2 +#define a0 16 +#define sp 30 +#define gp 29 +#define t12 27 +#define ra 26 +#define v0 0 + +// The same function encoded with alpha-codegen.h +// Still needs work on the fp insns. +// +char * write_testfunc_2( char * p ) +{ + alpha_lda( p, sp, sp, -16 ); + alpha_stq( p, ra, sp, 0 ); +// alpha_stq( p, fp, sp, 8 ); +// alpha_mov1( p, sp, fp ); + alpha_lda( p, t0, alpha_reg_zero, 10 ); + alpha_mov1( p, t0, v0 ); +// alpha_mov1( p, sp, fp ); + alpha_ldq( p, ra, sp, 0 ); +// alpha_ldq( p, fp, sp, 8 ); + alpha_lda( p, sp, sp, 16 ); + + alpha_ret( p, ra, 1 ); + + return p; +} + + +void output( char * p, int len ) +{ + int fd = open( "bad.out", O_CREAT | O_TRUNC ); + write( fd, p, len ); + close( fd ); +} + +int main( int argc, char ** argv ) { + char code [16000]; + char *p = code; + char * cp; + + printf( "%d", sizeof( unsigned int ) ); + + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + + // so, `test blah` gets you the byte-encoded function. + // and `test` gets you the alpha-codegen.h encoded function. + + if( argc > 1 ) + { + p = write_testfunc_1( p ); + } + else + { + p = write_testfunc_2( p ); + } + + for (cp = code; cp < p; cp++) + { + printf (".byte 0x%0.2x\n", (*cp&0x00ff) ); + } + + output( code, p-code ); + + return 0; +} diff --git a/alpha/tramp.c b/alpha/tramp.c new file mode 100644 index 0000000..4f01c90 --- /dev/null +++ b/alpha/tramp.c @@ -0,0 +1,16 @@ +#include "mono/interpreter/interp.h" + +MonoPIFunc +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + g_error ("Unsupported arch"); + return NULL; +} + +void * +mono_create_method_pointer (MonoMethod *method) +{ + g_error ("Unsupported arch"); + return NULL; +} + -- cgit v1.1 From f468e62377dfe3079f5b2bade1f43d239842e381 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 1 Feb 2003 10:02:52 +0000 Subject: Sat Feb 1 10:59:31 CET 2003 Paolo Molaro * alpha/*: update from Laramie. svn path=/trunk/mono/; revision=11090 --- ChangeLog | 4 + alpha/alpha-codegen.h | 103 +++++++++++++-------- alpha/test.c | 148 ++++++++++++++++++------------ alpha/tramp.c | 244 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 403 insertions(+), 96 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5fc4f6c..db9d34a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Sat Feb 1 10:59:31 CET 2003 Paolo Molaro + + * alpha/*: update from Laramie. + Mon Jan 27 12:49:10 CET 2003 Paolo Molaro * alpha/*: start of the port to the alpha architecture by diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index aa27179..26dc591 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -57,6 +57,8 @@ typedef enum { alpha_s4 = 13, alpha_s5 = 14, alpha_s6 = 15, + + alpha_fp = 15, /* frame pointer */ alpha_a0 = 16, /* argument registers */ alpha_a1 = 17, @@ -64,13 +66,20 @@ typedef enum { alpha_a3 = 19, alpha_a4 = 20, alpha_a5 = 21, - alpha_t8 = 22, + + alpha_t8 = 22, /* temporaries */ alpha_t9 = 23, alpha_t10 = 24, alpha_t11 = 25, + alpha_ra = 26, /* Return Address */ - alpha_t12 = 27, + + alpha_pv = 27, /* pv current procedure */ + alpha_t12 = 27, /* temp 12 */ + alpha_altreg = 28, + alpha_at = 28, + alpha_gp = 29, /* Global Pointer */ alpha_sp = 30, /* Stack Pointer */ } AlphaRegister; @@ -177,18 +186,23 @@ typedef enum { #define AXP_MEM_BR_SHIFT 14 #define AXP_LIT_SHIFT 13 +/* encode registers */ #define alpha_opcode( op ) \ ((op&AXP_OFF6_MASK) << AXP_OP_SHIFT) -#define alpha_reg1( reg ) \ +/* encode registers */ +#define alpha_reg_a( reg ) \ ((reg & AXP_REG_MASK) << AXP_REG1_SHIFT) -#define alpha_reg2( reg ) \ +#define alpha_reg_b( reg ) \ ((reg & AXP_REG_MASK) << AXP_REG2_SHIFT) -#define alpha_reg3( reg ) \ +#define alpha_reg_c( reg ) \ (reg & AXP_REG_MASK) + + +/* encode function codes */ #define alpha_fp_func( func ) \ ((func & AXP_OFF11_MASK) << AXP_REGSIZE) @@ -205,7 +219,6 @@ typedef enum { (func && AXP_OFF16_MASK) - #define alpha_encode_hw4_mem( op, func ) \ (alpha_opcode( op ) | (( func & 0x0f ) << 12)) @@ -228,47 +241,45 @@ typedef enum { #define alpha_encode_mem( ins, op, Rdest, Rsrc, offset ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rdest ) | \ - alpha_reg2( Rsrc ) | (offset & AXP_OFF16_MASK )) + alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ + alpha_reg_b( Rsrc ) | (offset & AXP_OFF16_MASK )) #define alpha_encode_mem_fc( ins, op, func, Rdest, Rsrc, offset ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rdest ) | \ - alpha_reg2( Rsrc ) | alpha_mem_fc_func( func )) + alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ + alpha_reg_b( Rsrc ) | alpha_mem_fc_func( func )) #define alpha_encode_mem_br( ins, op, func, Rdest, Rsrc, hint ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rdest ) | \ - alpha_reg2( Rsrc ) | alpha_mem_br_func( func, hint ) ) + alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ + alpha_reg_b( Rsrc ) | alpha_mem_br_func( func, hint ) ) #define alpha_encode_branch( ins, op, Reg, offset ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Reg ) | \ + alpha_opcode( op ) | alpha_reg_a( Reg ) | \ (offset & AXP_OFF21_MASK )) #define alpha_encode_op( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ - alpha_reg2( Rsrc2 ) | alpha_op_func( func ) | \ - alpha_reg3( Rdest )) + alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ + alpha_reg_b( Rsrc2 ) | alpha_op_func( func ) | \ + alpha_reg_c( Rdest )) #define alpha_encode_opl( ins, op, func, Rsrc, lit, Rdest ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ + alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ alpha_op_literal(lit) | ( 1 << 12 ) | \ - alpha_op_func( func ) | alpha_reg3( Rdest ) ) + alpha_op_func( func ) | alpha_reg_c( Rdest ) ) #define alpha_encode_fpop( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg1( Rsrc1 ) | \ - alpha_reg2( Rsrc2 ) | alpha_fp_func( func ) | \ - alpha_reg3( Rdest )) + alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ + alpha_reg_b( Rsrc2 ) | alpha_fp_func( func ) | \ + alpha_reg_c( Rdest )) -#define alpha_reg_zero 31 - /***************************************/ /* pal calls */ @@ -286,14 +297,21 @@ typedef enum { #define alpha_stb( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0e, Rdest, Rsrc, offset ) #define alpha_stq_u( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0f, Rdest, Rsrc, offset ) +#ifdef __VAX__ #define alpha_ldf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x20, Rdest, Rsrc, offset ) #define alpha_ldg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x21, Rdest, Rsrc, offset ) -#define alpha_lds( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x22, Rdest, Rsrc, offset ) -#define alpha_ldt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) #define alpha_stf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x24, Rdest, Rsrc, offset ) #define alpha_stg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x25, Rdest, Rsrc, offset ) +#endif + +#define alpha_lds( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x22, Rdest, Rsrc, offset ) +#define alpha_ldt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) +#define alpha_ldqf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) + #define alpha_sts( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x26, Rdest, Rsrc, offset ) #define alpha_stt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x27, Rdest, Rsrc, offset ) +#define alpha_stqf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x27, Rdest, Rsrc, offset ) + #define alpha_ldl( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x28, Rdest, Rsrc, offset ) #define alpha_ldq( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x29, Rdest, Rsrc, offset ) @@ -306,10 +324,10 @@ typedef enum { /* branch*/ -#define alpha_jmp( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x0, alpha_reg_zero, Rsrc, hint ) -#define alpha_jsr( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x1, alpha_reg_zero, Rsrc, hint ) -#define alpha_ret( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x2, alpha_reg_zero, Rsrc, hint ) -#define alpha_jsrco( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x3, alpha_reg_zero, Rsrc, hint ) +#define alpha_jmp( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x0, Rdest, Rsrc, hint ) +#define alpha_jsr( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x1, Rdest, Rsrc, hint ) +#define alpha_ret( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x2, alpha_zero, Rsrc, hint ) +#define alpha_jsrco( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x3, Rdest, Rsrc, hint ) #define alpha_br( ins, Reg, offset ) alpha_encode_branch( ins, 0x30, Reg, offset ) #define alpha_fbeq( ins, Reg, offset ) alpha_encode_branch( ins, 0x31, Reg, offset ) @@ -396,11 +414,11 @@ typedef enum { #define alpha_cmovlbs_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x14, Rsrc1, lit, Rdest ) #define alpha_cmovlbc( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x16, Rsrc1, Rsrc2, Rdest ) #define alpha_cmovlbc_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x16, Rsrc1, lit, Rdest ) -#define alpha_nop( ins ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, alpha_reg_zero, alpha_reg_zero ) -#define alpha_clr( ins, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, alpha_reg_zero, Rdest ) -#define alpha_mov1( ins, Rsrc, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, Rsrc, Rdest ) +#define alpha_nop( ins ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, alpha_zero, alpha_zero ) +#define alpha_clr( ins, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, alpha_zero, Rdest ) +#define alpha_mov1( ins, Rsrc, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, Rsrc, Rdest ) #define alpha_mov2( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) -#define alpha_mov_( ins, lit, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_reg_zero, lit, Rdest ) +#define alpha_mov_( ins, lit, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, lit, Rdest ) //#define alpha_or( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) //#define alpha_or_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x20, Rsrc1, lit, Rdest ) #define alpha_bis( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) @@ -409,8 +427,8 @@ typedef enum { #define alpha_cmoveq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x24, Rsrc1, lit, Rdest ) #define alpha_cmovne( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x26, Rsrc1, Rsrc2, Rdest ) #define alpha_cmovne_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x26, Rsrc1, lit, Rdest ) -#define alpha_not( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, alpha_reg_zero, Rsrc2, Rdest ) -#define alpha_not_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, alpha_reg_zero, lit, Rdest ) +#define alpha_not( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, alpha_zero, Rsrc2, Rdest ) +#define alpha_not_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, alpha_zero, lit, Rdest ) #define alpha_ornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, Rsrc1, Rsrc2, Rdest ) #define alpha_ornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, Rsrc1, lit, Rdest ) #define alpha_xor( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x40, Rsrc1, Rsrc2, Rdest ) @@ -423,8 +441,8 @@ typedef enum { #define alpha_eqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) //#define alpha_xornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x48, Rsrc1, Rsrc2, Rdest ) //#define alpha_xornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) -#define alpha_ev56b_amask( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x61, alpha_reg_zero, Rsrc2, Rdest ) -#define alpha_ev56b_amask_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x61, alpha_reg_zero, lit, Rdest ) +#define alpha_ev56b_amask( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x61, alpha_zero, Rsrc2, Rdest ) +#define alpha_ev56b_amask_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x61, alpha_zero, lit, Rdest ) #define alpha_cmovle( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x64, Rsrc1, Rsrc2, Rdest ) #define alpha_cmovle_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x64, Rsrc1, lit, Rdest ) #define alpha_cmovgt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x66, Rsrc1, Rsrc2, Rdest ) @@ -486,4 +504,13 @@ typedef enum { #define alpha_extqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x7a, Rsrc1, Rsrc2, Rdest ) #define alpha_extqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x7a, Rsrc1, lit, Rdest ) -#endif +#define alpha_ftois( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x078, RFsrc, alpha_zero, Rdest ) +#define alpha_ftoit( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) +#define alpha_ftoi_qf( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) + +#define alpha_itofs( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x004, Rsrc, alpha_zero, RFdest ) +#define alpha_itoff( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x014, Rsrc, alpha_zero, RFdest ) +#define alpha_itoft( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) +#define alpha_itof_qf( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) + +#endif \ No newline at end of file diff --git a/alpha/test.c b/alpha/test.c index f5ed45b..27db190 100644 --- a/alpha/test.c +++ b/alpha/test.c @@ -4,13 +4,52 @@ #include #include #include +#include + +/* A typical Alpha stack frame looks like this */ +/* +fun: // called from outside the module. + ldgp gp,0(pv) // load the global pointer +fun..ng: // called from inside the module. + lda sp, -SIZE( sp ) // grow the stack downwards. + + stq ra, 0(sp) // save the return address. + + stq s0, 8(sp) // callee-saved registers. + stq s1, 16(sp) // ... + + // Move the arguments to the argument registers... + + mov addr, pv // Load the callee address + jsr ra, (pv) // call the method. + ldgp gp, 0(ra) // restore gp + + // return value is in v0 + + ldq ra, 0(sp) // free stack frame + ldq s0, 8(sp) // restore callee-saved registers. + ldq s1, 16(sp) + ldq sp, 32(sp) // restore stack pointer + + ret zero, (ra), 1 // return. +*/ + // // Simple function which returns 10. // +int testfunc() +{ + return 10; +} + +// Write it using the known asm bytecodes. char * write_testfunc_1( char * p ) { +// +// ldah gp, 0(pv) +// lda gp, 0(gp) //00000001200004d0 : // 1200004d0: f0 ff de 23 lda sp,-16(sp) // 1200004d4: 00 00 5e b7 stq ra,0(sp) @@ -24,65 +63,56 @@ char * write_testfunc_1( char * p ) // 1200004f4: 10 00 de 23 lda sp,16(sp) // 1200004f8: 01 80 fa 6b ret - -// 1200004d0: f0 ff de 23 lda sp,-16(sp) - *p++ = 0xf0; *p++ = 0xff;*p++ = 0xde;*p++ = 0x23; -// 1200004d4: 00 00 5e b7 stq ra,0(sp) - *p++ = 0x00; *p++ = 0x00;*p++ = 0x5e;*p++ = 0xb7; -// 1200004d8: 08 00 fe b5 stq fp,8(sp) - *p++ = 0x08; *p++ = 0x00;*p++ = 0xfe;*p++ = 0xb5; -// 1200004dc: 0f 04 fe 47 mov sp,fp - *p++ = 0x0f; *p++ = 0x04;*p++ = 0xfe;*p++ = 0x47; -// 1200004e0: 0a 00 3f 20 lda t0,10 - *p++ = 0x0a; *p++ = 0x00;*p++ = 0x3f;*p++ = 0x20; -// 1200004e4: 00 04 e1 47 mov t0,v0 - *p++ = 0x00; *p++ = 0x04;*p++ = 0xe1;*p++ = 0x47; -// 1200004e8: 1e 04 ef 47 mov fp,sp - *p++ = 0x1e; *p++ = 0x04;*p++ = 0xef;*p++ = 0x47; -// 1200004ec: 00 00 5e a7 ldq ra,0(sp) - *p++ = 0x00; *p++ = 0x00;*p++ = 0x5e;*p++ = 0xa7; -// 1200004f0: 08 00 fe a5 ldq fp,8(sp) - *p++ = 0x08; *p++ = 0x00;*p++ = 0xfe;*p++ = 0xa5; -// 1200004f4: 10 00 de 23 lda sp,16(sp) - *p++ = 0x10; *p++ = 0x00;*p++ = 0xde;*p++ = 0x23; -// 1200004f8: 01 80 fa 6b ret - *p++ = 0x01; *p++ = 0x80;*p++ = 0xfa;*p++ = 0x6b; - return p; +int _func_code[] = { + 0x23defff0, + 0xb75e0000, + 0xb5fe0008, + 0x47fe040f, + 0x203f000a, + 0x47e10400, + 0x47ef041e, + 0xa75e0000, + 0xa5fe0008, + 0x23de0010, + 0x6bfa8001 }; + + memcpy( p , _func_code, 4 * 11 ); + return p + ( 4 * 11 ); } -#define t0 1 -#define t1 2 -#define a0 16 -#define sp 30 -#define gp 29 -#define t12 27 -#define ra 26 -#define v0 0 - // The same function encoded with alpha-codegen.h -// Still needs work on the fp insns. -// char * write_testfunc_2( char * p ) -{ - alpha_lda( p, sp, sp, -16 ); - alpha_stq( p, ra, sp, 0 ); -// alpha_stq( p, fp, sp, 8 ); -// alpha_mov1( p, sp, fp ); - alpha_lda( p, t0, alpha_reg_zero, 10 ); - alpha_mov1( p, t0, v0 ); -// alpha_mov1( p, sp, fp ); - alpha_ldq( p, ra, sp, 0 ); -// alpha_ldq( p, fp, sp, 8 ); - alpha_lda( p, sp, sp, 16 ); - - alpha_ret( p, ra, 1 ); - - return p; +{ + alpha_ldah( p, alpha_gp, alpha_pv, 0 ); // start the gp load + alpha_lda( p, alpha_sp, alpha_sp, -16 ); // allocate the stack + alpha_lda( p, alpha_gp, alpha_gp, 0 ); // finish the gp load + alpha_stq( p, alpha_ra, alpha_sp, 0 ); // start param save. + alpha_stq( p, alpha_fp, alpha_sp, 8 ); + alpha_mov1( p, alpha_sp, alpha_fp ); + alpha_lda( p, alpha_t0, alpha_zero, 10 ); + alpha_mov1( p, alpha_t0, alpha_v0 ); + alpha_mov1( p, alpha_fp, alpha_sp ); + alpha_ldq( p, alpha_ra, alpha_sp, 0 ); + alpha_ldq( p, alpha_fp, alpha_sp, 8 ); + alpha_lda( p, alpha_sp, alpha_sp, 16 ); + + alpha_ret( p, alpha_ra, 1 ); + + return p; } void output( char * p, int len ) { + char * maxp = p + len; + char * cp = p; + + printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + for ( ; cp < maxp; cp++ ) + { + printf (".byte 0x%0.2x\n", (*cp&0x00ff) ); + } + int fd = open( "bad.out", O_CREAT | O_TRUNC ); write( fd, p, len ); close( fd ); @@ -93,9 +123,9 @@ int main( int argc, char ** argv ) { char *p = code; char * cp; - printf( "%d", sizeof( unsigned int ) ); - - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + int (*x)() = 0; + int y = 0; + int z = 10; // so, `test blah` gets you the byte-encoded function. // and `test` gets you the alpha-codegen.h encoded function. @@ -109,12 +139,14 @@ int main( int argc, char ** argv ) { p = write_testfunc_2( p ); } - for (cp = code; cp < p; cp++) - { - printf (".byte 0x%0.2x\n", (*cp&0x00ff) ); - } + // output( code, p-code ); - output( code, p-code ); + // call the procedure. + x = (int(*)())code; + + while( z-- > 0 ) + y = x(); return 0; } + diff --git a/alpha/tramp.c b/alpha/tramp.c index 4f01c90..67e8613 100644 --- a/alpha/tramp.c +++ b/alpha/tramp.c @@ -1,4 +1,248 @@ +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ +/* + * Create trampolines to invoke arbitrary functions. + * + * Copyright (C) Ximian Inc. + * + * Authors: Laramie Leavitt (lar@leavitt.us) + * + * + */ + +/* A typical Alpha stack frame looks like this */ +/* +fun: // called from outside the module. + ldgp gp,0(pv) // load the global pointer +fun..ng: // called from inside the module. + lda sp, -SIZE( sp ) // grow the stack downwards. + + stq ra, 0(sp) // save the return address. + + stq s0, 8(sp) // callee-saved registers. + stq s1, 16(sp) // ... + + // Move the arguments to the argument registers... + + mov addr, pv // Load the callee address + jsr ra, (pv) // call the method. + ldgp gp, 0(ra) // restore gp + + // return value is in v0 + + ldq ra, 0(sp) // free stack frame + ldq s0, 8(sp) // restore callee-saved registers. + ldq s1, 16(sp) + ldq sp, 32(sp) // restore stack pointer + + ret zero, (ra), 1 // return. + +// assuming that the procedure is in a0. +#define emit_prologue( p ) \ + alpha_ldah( p, alpha_gp, alpha_pv, 0 ); \ + alpha_lda( p, alpha_sp, alpha_sp, -32 ); \ + alpha_lda( p, alpha_gp, alpha_gp, 0 ); \ + alpha_stq( p, alpha_ra, alpha_sp, 0 ); \ + alpha_stq( p, alpha_fp, alpha_sp, 8 ); \ + alpha_mov( p, alpha_sp, alpha_fp ) + +#define emit_move_a0_to_pv( p ) \ + alpha_mov( p, alpha_a0, alpha_pv ) + +#define emit_call( p ) \ + alpha_jsr( p, alpha_ra, alpha_pv, 0 ); \ + alpha_ldah( p, alpha_gp, alpha_ra, 0 ); \ + alpha_lda( p, alpha_gp, alpha_gp, 0 ); \ + +#define emit_epilogue( p ) \ + alpha_mov( p, alpha_fp, alpha_sp ); \ + alpha_ldq( p, alpha_ra, alpha_sp, 0 ); \ + alpha_ldq( p, alpha_fp, alpha_sp, 8 ); \ + alpha_lda( p, alpha_sp, alpha_sp, 32 ); \ + alpha_ret( p, alpha_ra ) + +*/ +/*****************************************************/ + +#include "config.h" +#include +#include + +#include "alpha-codegen.h" + +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" +#include "mono/metadata/debug-helpers.h" + +#define AXP_GENERAL_REGS 6 +#define AXP_MIN_STACK_SIZE 32 + +#define PROLOG_INS 6 +#define CALL_INS 3 +#define EPILOG_INS 5 + +/*****************************************************/ + +typedef struct { + guint i_regs; + guint f_regs; + guint stack_size; + guint code_size; +} size_data; + + +static char* +sig_to_name (MonoMethodSignature *sig, const char *prefix) +{ + /* from sparc.c. this should be global */ + + int i; + char *result; + GString *res = g_string_new (""); + + if (prefix) { + g_string_append (res, prefix); + g_string_append_c (res, '_'); + } + + mono_type_get_desc (res, sig->ret, TRUE); + + for (i = 0; i < sig->param_count; ++i) { + g_string_append_c (res, '_'); + mono_type_get_desc (res, sig->params [i], TRUE); + } + result = res->str; + g_string_free (res, FALSE); + return result; +} + + +static void inline +add_general ( size_data *sz, gboolean simple) +{ + // we don't really know yet, so just put something in here. + if ( sz->i_regs >= AXP_GENERAL_REGS) + { + sz->stack_size += 8; + } + + // ...and it probably doesn't matter if our code size is a + // little large... + + sz->code_size += 12; + sz->i_regs ++; +} + +static void +calculate_sizes (MonoMethodSignature *sig, + size_data *sz, + gboolean string_ctor) +{ + guint i, size; + guint32 simpletype, align; + + sz->i_regs = 0; + sz->f_regs = 0; + sz->stack_size = AXP_MIN_STACK_SIZE; + sz->code_size = 4 * (PROLOG_INS + CALL_INS + EPILOG_INS); + + if (sig->hasthis) { + add_general (&gr, sz, TRUE); + } + + for (i = 0; i < sig->param_count; ++i) { + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + add_general (&gr, sz, TRUE); + break; + case MONO_TYPE_VALUETYPE: + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + + /* align stack size to 8 */ + sz->stack_size = (sz->stack_size + 8) & ~8; + sz->local_size = (sz->local_size + 8) & ~8; +} + +/* */ +/* void func (void (*callme)(), void *retval, */ +/* void *this_obj, stackval *arguments); */ +static inline guint8 * +emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + guint stack_size; + + stack_size = sz->stack_size; + + /* function prolog */ + alpha_ldah( p, alpha_gp, alpha_pv, 0 ); + alpha_lda( p, alpha_sp, alpha_sp, -stack_size ); + alpha_lda( p, alpha_gp, alpha_gp, 0 ); + + /* save ra, fp */ + alpha_stq( p, alpha_ra, alpha_sp, 0 ); + alpha_stq( p, alpha_fp, alpha_sp, 8 ); + + /* store the return parameter */ + alpha_stq( p, alpha_a0, alpha_sp, 16 ); + alpha_stq( p, alpha_a1, alpha_sp, 24 ); + + /* load fp into sp */ + alpha_mov( p, alpha_sp, alpha_fp ) + + return p; +} + +static inline guint8 * +emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + alpha_mov( p, alpha_fp, alpha_sp ); + + /* restore fp, ra, sp */ + alpha_ldq( p, alpha_ra, alpha_sp, 0 ); + alpha_ldq( p, alpha_fp, alpha_sp, 8 ); + alpha_lda( p, alpha_sp, alpha_sp, 32 ); + + /* return */ + alpha_ret( p, alpha_ra ); +} + +static inline guint8 * +emit_call( guint8 *p, MonoMethodSignature *sig, size_data *sz ) +{ + /* move a0 into pv, ready to call */ + alpha_mov( p, alpha_a0, alpha_pv ); + + /* call arg */ + alpha_jsr( p, alpha_ra, alpha_pv, 0 ); + + /* reload the gp */ + alpha_ldah( p, alpha_gp, alpha_ra, 0 ); + alpha_lda( p, alpha_gp, alpha_gp, 0 ); +} + MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -- cgit v1.1 From e679a120b848ea9e35e7c8a38ca3e03a386371c7 Mon Sep 17 00:00:00 2001 From: Patrik Torstensson Date: Fri, 14 Feb 2003 10:01:29 +0000 Subject: 2003-02-14 Patrik Torstensson * x86-codegen.h: Added fstsw op code for getting fp flags svn path=/trunk/mono/; revision=11577 --- x86/x86-codegen.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index cea7d89..d816e27 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -6,6 +6,7 @@ * Intel Corporation (ORP Project) * Sergey Chaban (serge@wildwestsoftware.com) * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson * * Copyright (C) 2000 Intel Corporation. All rights reserved. * Copyright (C) 2001, 2002 Ximian, Inc. @@ -1231,6 +1232,13 @@ typedef union { } \ } while (0) +#define x86_fstsw(inst) \ + do { \ + *(inst)++ = (unsigned char)0x9b; \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + /** * @x86_fist_membase * Converts content of ST(0) to integer and stores it at memory location -- cgit v1.1 From e1b54daadf68eef0608ac03bd6fe4dc374d78675 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sun, 27 Apr 2003 11:40:11 +0000 Subject: Make the debugging output off by default. svn path=/trunk/mono/; revision=14039 --- sparc/tramp.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sparc/tramp.c b/sparc/tramp.c index e9c6a24..82e6fed 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -31,6 +31,8 @@ #define LOCAL_REGS 8 #define NOT_IMPL(x) g_error("FIXME: %s", x); +/*#define DEBUG(a) a*/ +#define DEBUG(a) /* Some assembly... */ #define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") @@ -149,7 +151,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } size = mono_class_native_size (sig->params[i]->data.klass, NULL); if (size != 4) { - fprintf(stderr, "copy %d byte struct on stack\n", size); + DEBUG(fprintf(stderr, "copy %d byte struct on stack\n", size)); *use_memcpy = TRUE; *code_size += 8*4; *stack_size += (size + 3) & (~3); @@ -319,7 +321,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (!klass->enumtype) { gint size = mono_class_native_size (klass, NULL); - fprintf(stderr, "retval value type size: %d\n", size); + DEBUG(fprintf(stderr, "retval value type size: %d\n", size)); if (size > 8) { sparc_ld_imm (p, sparc_sp, stack_size - 12, sparc_o0); @@ -331,7 +333,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, } } - fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)); + DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE))); for (i = 0; i < sig->param_count; i++) { if (sig->params[i]->byref) { @@ -558,8 +560,8 @@ mono_create_method_pointer (MonoMethod *method) p = code_buffer = g_malloc (code_size); - fprintf(stderr, "Delegate [start emiting] %s\n", method->name); - fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)); + DEBUG(fprintf(stderr, "Delegate [start emiting] %s\n", method->name)); + DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE))); p = emit_prolog (p, sig, stack_size); @@ -752,7 +754,7 @@ mono_create_method_pointer (MonoMethod *method) sparc_disassemble_code (code_buffer, p, method->name); - fprintf(stderr, "Delegate [end emiting] %s\n", method->name); + DEBUG(fprintf(stderr, "Delegate [end emiting] %s\n", method->name)); return ji->code_start; } -- cgit v1.1 From 27eb0661916c7c65b43def99be92895c61f4d315 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Sun, 27 Apr 2003 14:47:57 +0000 Subject: * ARM codegen update; svn path=/trunk/mono/; revision=14043 --- arm/arm-codegen.c | 8 +- arm/arm-codegen.h | 417 ++++++++++++++++++++++++++++++++++++++++++++++++------ arm/arm-dis.c | 35 +++-- arm/arm-dis.h | 1 + arm/cmp_macros.th | 65 +++++++-- arm/dpi_macros.th | 76 +++++++++- arm/dpiops.sh | 2 +- arm/mov_macros.th | 92 +++++++++++- 8 files changed, 618 insertions(+), 78 deletions(-) diff --git a/arm/arm-codegen.c b/arm/arm-codegen.c index 88d572a..9914ace 100644 --- a/arm/arm-codegen.c +++ b/arm/arm-codegen.c @@ -156,9 +156,9 @@ arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int if ((shift & 0x80000001) != 1) { if (shift >= 0) { - ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift >> 1, cond); + ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond); } else { - ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2) >> 1, cond); + ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond); } } else { mov_op = ARMOP_MOV; @@ -172,12 +172,12 @@ arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int shift = (arm_bsf(imm32) - 1) & (~1); snip = imm32 & (0xFF << shift); - ARM_EMIT(p, ARM_DEF_DPI_IMM_COND(snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond)); + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond)); while ((imm32 ^= snip) != 0) { shift = (arm_bsf(imm32) - 1) & (~1); snip = imm32 & (0xFF << shift); - ARM_EMIT(p, ARM_DEF_DPI_IMM_COND(snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond)); + ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond)); } } diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 3d6c798..61302a4 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -31,7 +31,13 @@ arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32); void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;} # define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i)) #else -# define ARM_EMIT(p, i) *(arminstr_t*)p = (arminstr_t)i; ((arminstr_t*)p)++ +# define ARM_EMIT(p, i) do {*(arminstr_t*)p = (arminstr_t)i; ((arminstr_t*)p)++;} while (0) +#endif + +#if defined(_MSC_VER) && !defined(ARM_NOIASM) +# define ARM_IASM(_expr) __emit (_expr) +#else +# define ARM_IASM(_expr) #endif /* even_scale = rot << 1 */ @@ -98,6 +104,9 @@ typedef enum { ARMREG_CR14, ARMREG_CR15, + /* XScale: acc0 on CP0 */ + ARMREG_ACC0 = ARMREG_CR0, + ARMREG_MAX = ARMREG_R15 } ARMReg; @@ -110,22 +119,22 @@ typedef enum { typedef enum { - ARMCOND_EQ = 0x0, /* Equal */ - ARMCOND_NE = 0x1, /* Not equal, or unordered */ - ARMCOND_CS = 0x2, /* Carry set */ - ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same */ - ARMCOND_CC = 0x3, /* Carry clear */ + ARMCOND_EQ = 0x0, /* Equal; Z = 1 */ + ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */ + ARMCOND_CS = 0x2, /* Carry set; C = 1 */ + ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */ + ARMCOND_CC = 0x3, /* Carry clear; C = 0 */ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */ - ARMCOND_MI = 0x4, /* Negative */ - ARMCOND_PL = 0x5, /* Positive or zero */ - ARMCOND_VS = 0x6, /* Overflow */ - ARMCOND_VC = 0x7, /* No overflow */ - ARMCOND_HI = 0x8, /* Unsigned higher */ - ARMCOND_LS = 0x9, /* Unsigned lower or same */ - ARMCOND_GE = 0xA, /* Signed greater than or equal */ - ARMCOND_LT = 0xB, /* Signed less than */ - ARMCOND_GT = 0xC, /* Signed greater than */ - ARMCOND_LE = 0xD, /* Signed less than or equal */ + ARMCOND_MI = 0x4, /* Negative; N = 1 */ + ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */ + ARMCOND_VS = 0x6, /* Overflow; V = 1 */ + ARMCOND_VC = 0x7, /* No overflow; V = 0 */ + ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */ + ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */ + ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */ + ARMCOND_LT = 0xB, /* Signed less than; N != V */ + ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */ + ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */ ARMCOND_AL = 0xE, /* Always */ ARMCOND_NV = 0xF, /* Never */ @@ -185,12 +194,41 @@ typedef enum { ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */ /* ARM3M+ */ - ARMOP_UMUL = 0x4, + ARMOP_UMULL = 0x4, ARMOP_UMLAL = 0x5, ARMOP_SMULL = 0x6, - ARMOP_SMLAL = 0x7 + ARMOP_SMLAL = 0x7, + + /* for data transfers with register offset */ + ARM_UP = 1, + ARM_DOWN = 0 } ARMOpcode; +typedef enum { + THUMBOP_AND = 0, + THUMBOP_EOR = 1, + THUMBOP_LSL = 2, + THUMBOP_LSR = 3, + THUMBOP_ASR = 4, + THUMBOP_ADC = 5, + THUMBOP_SBC = 6, + THUMBOP_ROR = 7, + THUMBOP_TST = 8, + THUMBOP_NEG = 9, + THUMBOP_CMP = 10, + THUMBOP_CMN = 11, + THUMBOP_ORR = 12, + THUMBOP_MUL = 13, + THUMBOP_BIC = 14, + THUMBOP_MVN = 15, + THUMBOP_MOV = 16, + THUMBOP_CMPI = 17, + THUMBOP_ADD = 18, + THUMBOP_SUB = 19, + THUMBOP_CMPH = 19, + THUMBOP_MOVH = 20 +} ThumbOpcode; + /* Generic form - all ARM instructions are conditional. */ typedef struct { @@ -295,12 +333,18 @@ typedef union { #define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL) - +/* codegen */ #define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond)) #define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond)) +/* inline */ +#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ + ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond)) +#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \ + ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond)) + #define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \ @@ -314,6 +358,7 @@ typedef union { (ARM_DPI_TAG) | \ ARM_DEF_COND(cond) +/* codegen */ #define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond)) @@ -326,7 +371,46 @@ typedef union { #define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond)) +/* inline */ +#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \ + ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond)) +#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \ + ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond)) + +#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \ + ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond)) + +#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \ + ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond)) + + +/* Rd := Rn op (Rm shift_type Rs) */ +#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \ + (rm) | \ + (1 << 4) | \ + ((shift_type & 3) << 5) | \ + ((rs) << 8) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((s) << 20) | \ + ((op) << 21) | \ + (ARM_DPI_TAG) | \ + ARM_DEF_COND(cond) + +/* codegen */ +#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond)) + +#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \ + ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond)) + +/* inline */ +#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \ + ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond)) + +#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \ + ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond)) @@ -348,27 +432,47 @@ typedef struct { #define ARM_MRT_TAG ARM_MRT_ID << 25 #define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \ - (regs) | \ - (rn << 16) | \ - (l << 20) | \ - (w << 21) | \ - (s << 22) | \ - (u << 23) | \ - (p << 24) | \ - (ARM_MRT_TAG) | \ - ARM_DEF_COND(cond) + (regs) | \ + (rn << 16) | \ + (l << 20) | \ + (w << 21) | \ + (s << 22) | \ + (u << 23) | \ + (p << 24) | \ + (ARM_MRT_TAG) | \ + ARM_DEF_COND(cond) /* stmdb sp!, {regs} */ #define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL)) +#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL)) /* ldmia sp!, {regs} */ #define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL)) +#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL)) /* ldmia sp, {regs} ; (no write-back) */ #define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL)) - +#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL)) + +#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1)) +#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2)) +#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3)) +#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4)) +#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5)) +#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6)) +#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7)) +#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8)) + +#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8)) +#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7)) +#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6)) +#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5)) +#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4)) +#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3)) +#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2)) +#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1)) /* Multiply instructions */ @@ -389,6 +493,60 @@ typedef struct { #define ARM_MUL_MASK ((0xF << 24) | (0xF << 4)) #define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4)) +#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \ + (rm) | \ + ((rs) << 8) | \ + ((rn) << 12) | \ + ((rd) << 16) | \ + ((s & 1) << 17) | \ + ((op & 7) << 18) | \ + ARM_MUL_TAG | \ + ARM_DEF_COND(cond) + +/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */ +#define ARM_MUL_COND(p, rd, rm, rs, cond) \ + ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond)) +#define ARM_MUL(p, rd, rm, rs) \ + ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_MULS_COND(p, rd, rm, rs, cond) \ + ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond)) +#define ARM_MULS(p, rd, rm, rs) \ + ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs) +#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs) + +/* inline */ +#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \ + ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond)) +#define ARM_IASM_MUL(rd, rm, rs) \ + ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL) +#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \ + ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond)) +#define ARM_IASM_MULS(rd, rm, rs) \ + ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL) + + +/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */ +#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \ + ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond)) +#define ARM_MLA(p, rd, rm, rs, rn) \ + ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL) +#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \ + ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond)) +#define ARM_MLAS(p, rd, rm, rs, rn) \ + ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL) + +/* inline */ +#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \ + ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond)) +#define ARM_IASM_MLA(rd, rm, rs, rn) \ + ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL) +#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \ + ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond)) +#define ARM_IASM_MLAS(rd, rm, rs, rn) \ + ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL) + + /* Word/byte transfer */ typedef union { @@ -400,7 +558,7 @@ typedef union { arminstr_t ls : 1; arminstr_t wb : 1; arminstr_t b : 1; - arminstr_t u : 1; + arminstr_t u : 1; /* down(0) / up(1) */ arminstr_t p : 1; /* post-index(0) / pre-index(1) */ arminstr_t type : 1; /* imm(0) / register(1) */ arminstr_t tag : 2; /* 0 1 */ @@ -427,7 +585,7 @@ typedef union { #define ARM_WXFER_MAX_OFFS 0xFFF -/* this macro checks imm12 bounds */ +/* this macro checks for imm12 bounds */ #define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \ do { \ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \ @@ -487,9 +645,13 @@ typedef union { #define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL) +/* write-back */ +#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \ + ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond) +#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL) -#define ARM_DEF_WXFER_REG_REG(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \ +#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \ (rm) | \ ((shift_type) << 5) | \ ((shift) << 7) | \ @@ -498,21 +660,49 @@ typedef union { ((ls) << 20) | \ ((wb) << 21) | \ ((b) << 22) | \ - ((p) << 24) | \ - (1 << 25) | \ + ((u) << 23) | \ + ((p) << 24) | \ + (1 << 25) | \ ARM_WXFER_TAG | \ ARM_DEF_COND(cond) +#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \ + ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond) +#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \ + ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond) + #define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \ - ARM_EMIT(p, ARM_DEF_WXFER_REG_REG(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond)) + ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond)) #define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL) #define ARM_LDR_REG_REG(p, rd, rn, rm) \ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0) - - +#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond)) +#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \ + ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL) +#define ARM_LDRB_REG_REG(p, rd, rn, rm) \ + ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0) + +#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond)) +#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \ + ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL) +#define ARM_STR_REG_REG(p, rd, rn, rm) \ + ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0) + +/* zero-extend */ +#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \ + ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond)) +#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \ + ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL) +#define ARM_STRB_REG_REG(p, rd, rn, rm) \ + ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0) + + +/* ARMv4+ */ /* Half-word or byte (signed) transfer. */ typedef struct { arminstr_t rm : 4; /* imm_lo */ @@ -573,6 +763,43 @@ typedef struct { ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL) +#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \ + ((rm) & 0xF) | \ + ((h) << 5) | \ + ((s) << 6) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((ls) << 20) | \ + ((wb) << 21) | \ + (0 << 22) | \ + ((u) << 23) | \ + ((p) << 24) | \ + ARM_HXFER_TAG | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \ + ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond) +#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \ + ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond) + +#define ARM_LDRH_REG_REG_COND(p, rm, rd, rn, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRH_REG_REG(p, rm, rd, rn) \ + ARM_LDRH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) +#define ARM_LDRSH_REG_REG_COND(p, rm, rd, rn, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRSH_REG_REG(p, rm, rd, rn) \ + ARM_LDRSH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) +#define ARM_LDRSB_REG_REG_COND(p, rm, rd, rn, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) +#define ARM_LDRSB_REG_REG(p, rm, rd, rn) ARM_LDRSB_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) + +#define ARM_STRH_REG_REG_COND(p, rm, rd, rn, cond) \ + ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond)) +#define ARM_STRH_REG_REG(p, rm, rd, rn) \ + ARM_STRH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) + + /* Swap */ typedef struct { @@ -713,6 +940,114 @@ typedef struct { + +#include "arm_dpimacros.h" + +#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0) + + +#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond) +#define ARM_SHL_IMM(p, rd, rm, imm) \ + ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL) +#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond) +#define ARM_SHLS_IMM(p, rd, rm, imm) \ + ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL) + +#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond) +#define ARM_SHR_IMM(p, rd, rm, imm) \ + ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL) +#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond) +#define ARM_SHRS_IMM(p, rd, rm, imm) \ + ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL) + +#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond) +#define ARM_SAR_IMM(p, rd, rm, imm) \ + ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL) +#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond) +#define ARM_SARS_IMM(p, rd, rm, imm) \ + ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL) + +#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond) +#define ARM_ROR_IMM(p, rd, rm, imm) \ + ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL) +#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \ + ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond) +#define ARM_RORS_IMM(p, rd, rm, imm) \ + ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL) + +#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond) +#define ARM_SHL_REG(p, rd, rm, rs) \ + ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond) +#define ARM_SHLS_REG(p, rd, rm, rs) \ + ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs) + +#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond) +#define ARM_SHR_REG(p, rd, rm, rs) \ + ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond) +#define ARM_SHRS_REG(p, rd, rm, rs) \ + ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs) + +#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond) +#define ARM_SAR_REG(p, rd, rm, rs) \ + ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond) +#define ARM_SARS_REG(p, rd, rm, rs) \ + ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs) + +#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond) +#define ARM_ROR_REG(p, rd, rm, rs) \ + ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \ + ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond) +#define ARM_RORS_REG(p, rd, rm, rs) \ + ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL) +#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs) + +#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010) +#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010) + +#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1) +#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1) + + +/* ARM V5 */ + +/* Count leading zeros, CLZ{cond} Rd, Rm */ +typedef struct { + arminstr_t rm : 4; + arminstr_t tag2 : 8; + arminstr_t rd : 4; + arminstr_t tag : 12; + arminstr_t cond : 4; +} ARMInstrCLZ; + +#define ARM_CLZ_ID 0x16F +#define ARM_CLZ_ID2 0xF1 +#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4)) +#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4)) + + + + typedef union { ARMInstrBR br; ARMInstrDPI dpi; @@ -727,22 +1062,16 @@ typedef union { ARMInstrSWI swi; ARMInstrMSR msr; ARMInstrMRS mrs; + ARMInstrCLZ clz; ARMInstrGeneric generic; arminstr_t raw; } ARMInstr; -#include "arm_dpimacros.h" - -#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0) - - #ifdef __cplusplus } #endif #endif /* ARM_H */ - - diff --git a/arm/arm-dis.c b/arm/arm-dis.c index caec7e5..0a478bc 100644 --- a/arm/arm-dis.c +++ b/arm/arm-dis.c @@ -26,7 +26,7 @@ const static char* ops[] = { const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"}; const static char* mul_ops[] = { - "mul", "mla", "?", "?", "umul", "umlal", "smull", "smlal" + "mul", "mla", "?", "?", "umull", "umlal", "smull", "smlal" }; const static char* reg_alias[] = { @@ -58,6 +58,7 @@ void dump_mul(ARMDis* dis, ARMInstr i); void dump_swi(ARMDis* dis, ARMInstr i); void dump_swp(ARMDis* dis, ARMInstr i); void dump_wxfer(ARMDis* dis, ARMInstr i); +void dump_clz(ARMDis* dis, ARMInstr i); /* @@ -92,9 +93,9 @@ FILE* armdis_get_output(ARMDis* dis) { void dump_reg(ARMDis* dis, int reg) { reg &= 0xF; if (!use_reg_alias || (reg > 3 && reg < 11)) { - fprintf(dis->dis_out, "r%d", reg); + fprintf(dis->dis_out, "r%d", reg); } else { - fprintf(dis->dis_out, reg_alias[reg]); + fprintf(dis->dis_out, reg_alias[reg]); } } @@ -136,28 +137,29 @@ void dump_reglist(ARMDis* dis, int reg_list) { void dump_br(ARMDis* dis, ARMInstr i) { - fprintf(dis->dis_out, "b%s%s\t%x", + fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %p", (i.br.link == 1) ? "l" : "", - cond[i.br.cond], i.br.offset); + cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6)); } void dump_dpi(ARMDis* dis, ARMInstr i) { fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]); - if ((i.dpi.all.opcode >= ARMOP_TST) && (i.dpi.all.opcode <= ARMOP_CMN) && (i.dpi.all.s != 0)) { + if ((i.dpi.all.opcode < ARMOP_TST || i.dpi.all.opcode > ARMOP_CMN) && (i.dpi.all.s != 0)) { fprintf(dis->dis_out, "s"); } fprintf(dis->dis_out, "\t"); if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) { - /* comparison operation */ + /* for comparison operations Rd is ignored */ dump_reg(dis, i.dpi.all.rd); fprintf(dis->dis_out, ", "); } if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) { + /* for MOV/MVN Rn is ignored */ dump_reg(dis, i.dpi.all.rn); fprintf(dis->dis_out, ", "); } @@ -189,10 +191,11 @@ void dump_dpi(ARMDis* dis, ARMInstr i) { } void dump_wxfer(ARMDis* dis, ARMInstr i) { - fprintf(dis->dis_out, "%s%s%s\t", + fprintf(dis->dis_out, "%s%s%s%s\t", (i.wxfer.all.ls == 0) ? "str" : "ldr", cond[i.generic.cond], - (i.wxfer.all.b == 0) ? "" : "b"); + (i.wxfer.all.b == 0) ? "" : "b", + (i.wxfer.all.ls != 0 && i.wxfer.all.wb != 0) ? "t" : ""); dump_reg(dis, i.wxfer.all.rd); fprintf(dis->dis_out, ", ["); dump_reg(dis, i.wxfer.all.rn); @@ -277,7 +280,7 @@ void dump_mul(ARMDis* dis, ARMInstr i) { fprintf(dis->dis_out, ", "); dump_reg(dis, i.mul.rn); break; - case ARMOP_UMUL: + case ARMOP_UMULL: case ARMOP_UMLAL: case ARMOP_SMULL: case ARMOP_SMLAL: @@ -372,6 +375,15 @@ void dump_swi(ARMDis* dis, ARMInstr i) { } +void dump_clz(ARMDis* dis, ARMInstr i) { + fprintf(dis->dis_out, "clz%s\t"); + dump_reg(dis, i.clz.rd); + fprintf(dis->dis_out, ", "); + dump_reg(dis, i.clz.rm); + fprintf(dis->dis_out, "\n"); +} + + void armdis_decode(ARMDis* dis, void* p, int size) { int i; @@ -386,6 +398,7 @@ void armdis_decode(ARMDis* dis, void* p, int size) { for (i=0; idis_out, "%p:\t%08x\t", pi, *pi); + dis->pi = pi; instr.raw = *pi++; if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) { @@ -394,6 +407,8 @@ void armdis_decode(ARMDis* dis, void* p, int size) { dump_swp(dis, instr); } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) { dump_mul(dis, instr); + } else if ((instr.raw & ARM_CLZ_MASK) == ARM_CLZ_TAG) { + dump_clz(dis, instr); } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) { dump_wxfer(dis, instr); } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) { diff --git a/arm/arm-dis.h b/arm/arm-dis.h index b93db30..8019499 100644 --- a/arm/arm-dis.h +++ b/arm/arm-dis.h @@ -14,6 +14,7 @@ extern "C" { typedef struct _ARMDis { FILE* dis_out; + void* pi; } ARMDis; diff --git a/arm/cmp_macros.th b/arm/cmp_macros.th index 8a35708..cb2639d 100644 --- a/arm/cmp_macros.th +++ b/arm/cmp_macros.th @@ -1,11 +1,56 @@ -/* PSR = rd, (imm8 ROR 2*rot) */ -#define ARM__REG_IMM_COND(p, rd, imm8, rot, cond) \ - ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, rd, 0, imm8, rot, cond) -#define ARM__REG_IMM(p, rd, imm8, rot) \ - ARM__REG_IMM_COND(p, rd, imm8, rot, ARMCOND_AL) -/* PSR = rd, imm8 */ -#define ARM__REG_IMM8_COND(p, rd, imm8, cond) \ - ARM__REG_IMM_COND(p, rd, imm8, 0, cond) -#define ARM__REG_IMM8(p, rd, imm8) \ - ARM__REG_IMM8_COND(p, rd, imm8, ARMCOND_AL) +/* PSR := Rn, (imm8 ROR 2*rot) */ +#define ARM__REG_IMM_COND(p, rn, imm8, rot, cond) \ + ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_, 0, rn, imm8, rot, cond) +#define ARM__REG_IMM(p, rn, imm8, rot) \ + ARM__REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_IMM_COND(rn, imm8, rot, cond) \ + ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_, 0, rn, imm8, rot, cond) +#define __REG_IMM(rn, imm8, rot) \ + __REG_IMM_COND(rn, imm8, rot, ARMCOND_AL) +#endif + + +/* PSR := Rn, imm8 */ +#define ARM__REG_IMM8_COND(p, rn, imm8, cond) \ + ARM__REG_IMM_COND(p, rn, imm8, 0, cond) +#define ARM__REG_IMM8(p, rn, imm8) \ + ARM__REG_IMM8_COND(p, rn, imm8, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_IMM8_COND(rn, imm8, cond) \ + __REG_IMM_COND(rn, imm8, 0, cond) +#define __REG_IMM8(rn, imm8) \ + __REG_IMM8_COND(rn, imm8, ARMCOND_AL) +#endif + + +/* PSR := Rn, Rm */ +#define ARM__REG_REG_COND(p, rn, rm, cond) \ + ARM_DPIOP_S_REG_REG_COND(p, ARMOP_, 0, rn, rm, cond) +#define ARM__REG_REG(p, rn, rm) \ + ARM__REG_REG_COND(p, rn, rm, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_REG_COND(rn, rm, cond) \ + ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_, 0, rn, rm, cond) +#define __REG_REG(rn, rm) \ + __REG_REG_COND(rn, rm, ARMCOND_AL) +#endif + + +/* PSR := Rn, (Rm imm8) */ +#define ARM__REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \ + ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_, 0, rn, rm, shift_type, imm_shift, cond) +#define ARM__REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \ + ARM__REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \ + ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_, 0, rn, rm, shift_type, imm_shift, cond) +#define __REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \ + __REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL) +#endif + diff --git a/arm/dpi_macros.th b/arm/dpi_macros.th index f8ec608..be43d1f 100644 --- a/arm/dpi_macros.th +++ b/arm/dpi_macros.th @@ -1,6 +1,6 @@ /* -- -- */ -/* rd = rn (imm8 ROR rot) ; rot is power of 2 */ +/* Rd := Rn (imm8 ROR rot) ; rot is power of 2 */ #define ARM__REG_IMM_COND(p, rd, rn, imm8, rot, cond) \ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, rd, rn, imm8, rot, cond) #define ARM__REG_IMM(p, rd, rn, imm8, rot) \ @@ -10,7 +10,19 @@ #define ARM_S_REG_IMM(p, rd, rn, imm8, rot) \ ARM_S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL) -/* rd = rn imm8 */ +#ifndef ARM_NOIASM +#define __REG_IMM_COND(rd, rn, imm8, rot, cond) \ + ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_, rd, rn, imm8, rot, cond) +#define __REG_IMM(rd, rn, imm8, rot) \ + __REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL) +#define _S_REG_IMM_COND(rd, rn, imm8, rot, cond) \ + ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_, rd, rn, imm8, rot, cond) +#define _S_REG_IMM(rd, rn, imm8, rot) \ + _S_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL) +#endif + + +/* Rd := Rn imm8 */ #define ARM__REG_IMM8_COND(p, rd, rn, imm8, cond) \ ARM__REG_IMM_COND(p, rd, rn, imm8, 0, cond) #define ARM__REG_IMM8(p, rd, rn, imm8) \ @@ -20,8 +32,19 @@ #define ARM_S_REG_IMM8(p, rd, rn, imm8) \ ARM_S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL) +#ifndef ARM_NOIASM +#define __REG_IMM8_COND(rd, rn, imm8, cond) \ + __REG_IMM_COND(rd, rn, imm8, 0, cond) +#define __REG_IMM8(rd, rn, imm8) \ + __REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL) +#define _S_REG_IMM8_COND(rd, rn, imm8, cond) \ + _S_REG_IMM_COND(rd, rn, imm8, 0, cond) +#define _S_REG_IMM8(rd, rn, imm8) \ + _S_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL) +#endif + -/* rd = rn rm */ +/* Rd := Rn Rm */ #define ARM__REG_REG_COND(p, rd, rn, rm, cond) \ ARM_DPIOP_REG_REG_COND(p, ARMOP_, rd, rn, rm, cond) #define ARM__REG_REG(p, rd, rn, rm) \ @@ -31,7 +54,19 @@ #define ARM_S_REG_REG(p, rd, rn, rm) \ ARM_S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL) -/* rd = rn (rm imm_shift) */ +#ifndef ARM_NOIASM +#define __REG_REG_COND(rd, rn, rm, cond) \ + ARM_IASM_DPIOP_REG_REG_COND(ARMOP_, rd, rn, rm, cond) +#define __REG_REG(rd, rn, rm) \ + __REG_REG_COND(rd, rn, rm, ARMCOND_AL) +#define _S_REG_REG_COND(rd, rn, rm, cond) \ + ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_, rd, rn, rm, cond) +#define _S_REG_REG(rd, rn, rm) \ + _S_REG_REG_COND(rd, rn, rm, ARMCOND_AL) +#endif + + +/* Rd := Rn (Rm imm_shift) */ #define ARM__REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_, rd, rn, rm, shift_type, imm_shift, cond) #define ARM__REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \ @@ -41,4 +76,37 @@ #define ARM_S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \ ARM_S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL) +#ifndef ARM_NOIASM +#define __REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \ + ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_, rd, rn, rm, shift_type, imm_shift, cond) +#define __REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \ + __REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL) +#define _S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \ + ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_, rd, rn, rm, shift_type, imm_shift, cond) +#define _S_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \ + _S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL) +#endif + + +/* Rd := Rn (Rm Rs) */ +#define ARM__REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \ + ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_, rd, rn, rm, shift_t, rs, cond) +#define ARM__REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \ + ARM__REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL) +#define ARM_S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \ + ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_, rd, rn, rm, shift_t, rs, cond) +#define ARM_S_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \ + ARM_S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \ + ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_, rd, rn, rm, shift_t, rs, cond) +#define __REG_REGSHIFT(rd, rn, rm, shift_type, rs) \ + __REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL) +#define _S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \ + ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_, rd, rn, rm, shift_t, rs, cond) +#define _S_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \ + _S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL) +#endif + diff --git a/arm/dpiops.sh b/arm/dpiops.sh index 2eb43d9..ad394ae 100755 --- a/arm/dpiops.sh +++ b/arm/dpiops.sh @@ -28,5 +28,5 @@ echo -e "\n\n" >> $OUTFILE echo -e "\n/* DPIs, comparison */\n" >> $OUTFILE gen "$CMP_OPCODES" cmp_macros -echo -e "/* end generated */\n\n" >> $OUTFILE +echo -e "\n/* end generated */\n" >> $OUTFILE diff --git a/arm/mov_macros.th b/arm/mov_macros.th index 151a29b..6bac290 100644 --- a/arm/mov_macros.th +++ b/arm/mov_macros.th @@ -1,18 +1,52 @@ -/* rd = imm8 ROR rot */ +/* Rd := imm8 ROR rot */ #define ARM__REG_IMM_COND(p, reg, imm8, rot, cond) \ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, rot, cond) #define ARM__REG_IMM(p, reg, imm8, rot) \ ARM__REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL) -#define ARM__REG_IMM8(p, reg, imm8) \ - ARM__REG_IMM(p, reg, imm8, 0) /* S */ #define ARM_S_REG_IMM_COND(p, reg, imm8, rot, cond) \ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, rot, cond) #define ARM_S_REG_IMM(p, reg, imm8, rot) \ ARM_S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL) +#ifndef ARM_NOIASM +#define __REG_IMM_COND(reg, imm8, rot, cond) \ + ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_, reg, 0, imm8, rot, cond) +#define __REG_IMM(reg, imm8, rot) \ + __REG_IMM_COND(reg, imm8, rot, ARMCOND_AL) +/* S */ +#define _S_REG_IMM_COND(reg, imm8, rot, cond) \ + ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_, reg, 0, imm8, rot, cond) +#define _S_REG_IMM(reg, imm8, rot) \ + _S_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL) +#endif + + +/* Rd := imm8 */ +#define ARM__REG_IMM8_COND(p, reg, imm8, cond) \ + ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, 0, cond) +#define ARM__REG_IMM8(p, reg, imm8) \ + ARM__REG_IMM8_COND(p, reg, imm8, ARMCOND_AL) +/* S */ +#define ARM_S_REG_IMM8_COND(p, reg, imm8, cond) \ + ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_, reg, 0, imm8, 0, cond) +#define ARM_S_REG_IMM8(p, reg, imm8) \ + ARM_S_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_IMM8_COND(reg, imm8, cond) \ + ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_, reg, 0, imm8, 0, cond) +#define __REG_IMM8(reg, imm8) \ + __REG_IMM8_COND(reg, imm8, ARMCOND_AL) +/* S */ +#define _S_REG_IMM8_COND(reg, imm8, cond) \ + ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_, reg, 0, imm8, 0, cond) +#define _S_REG_IMM8(reg, imm8) \ + _S_REG_IMM8_COND(reg, imm8, ARMCOND_AL) +#endif + -/* rd = rm */ +/* Rd := Rm */ #define ARM__REG_REG_COND(p, rd, rm, cond) \ ARM_DPIOP_REG_REG_COND(p, ARMOP_, rd, 0, rm, cond) #define ARM__REG_REG(p, rd, rm) \ @@ -23,9 +57,20 @@ #define ARM_S_REG_REG(p, rd, rm) \ ARM_S_REG_REG_COND(p, rd, rm, ARMCOND_AL) +#ifndef ARM_NOIASM +#define __REG_REG_COND(rd, rm, cond) \ + ARM_IASM_DPIOP_REG_REG_COND(ARMOP_, rd, 0, rm, cond) +#define __REG_REG(rd, rm) \ + __REG_REG_COND(rd, rm, ARMCOND_AL) +/* S */ +#define _S_REG_REG_COND(rd, rm, cond) \ + ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_, rd, 0, rm, cond) +#define _S_REG_REG(rd, rm) \ + _S_REG_REG_COND(rd, rm, ARMCOND_AL) +#endif -/* rd = rm imm_shift */ +/* Rd := Rm imm_shift */ #define ARM__REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_, rd, 0, rm, shift_type, imm_shift, cond) #define ARM__REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \ @@ -36,4 +81,41 @@ #define ARM_S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \ ARM_S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL) +#ifndef ARM_NOIASM +#define __REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \ + ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_, rd, 0, rm, shift_type, imm_shift, cond) +#define __REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \ + __REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL) +/* S */ +#define _S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \ + ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_, rd, 0, rm, shift_type, imm_shift, cond) +#define _S_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \ + _S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL) +#endif + + + +/* Rd := (Rm Rs) */ +#define ARM__REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \ + ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_, rd, 0, rm, shift_type, rs, cond) +#define ARM__REG_REGSHIFT(p, rd, rm, shift_type, rs) \ + ARM__REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL) +/* S */ +#define ARM_S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \ + ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_, rd, 0, rm, shift_type, rs, cond) +#define ARM_S_REG_REGSHIFT(p, rd, rm, shift_type, rs) \ + ARM_S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL) + +#ifndef ARM_NOIASM +#define __REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \ + ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_, rd, 0, rm, shift_type, rs, cond) +#define __REG_REGSHIFT(rd, rm, shift_type, rs) \ + __REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL) +/* S */ +#define _S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \ + ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_, rd, 0, rm, shift_type, rs, cond) +#define _S_REG_REGSHIFT(rd, rm, shift_type, rs) \ + _S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL) +#endif + -- cgit v1.1 From dfe276d1e1d116b113a639eecbc14c3661af5462 Mon Sep 17 00:00:00 2001 From: Sergey Chaban Date: Sun, 27 Apr 2003 14:50:16 +0000 Subject: arm-WMMX.h: initial WirelessMMX support for ARM codegen; svn path=/trunk/mono/; revision=14044 --- arm/arm-wmmx.h | 177 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100755 arm/arm-wmmx.h diff --git a/arm/arm-wmmx.h b/arm/arm-wmmx.h new file mode 100755 index 0000000..427c4fc --- /dev/null +++ b/arm/arm-wmmx.h @@ -0,0 +1,177 @@ +/* + * ARM CodeGen + * XScale WirelessMMX extensions + * Copyright 2002 Wild West Software + */ + +#ifndef __WMMX_H__ +#define __WMMX_H__ 1 + +#if 0 +#include +#endif + +#if defined(ARM_IASM) +# define WM_ASM(_expr) ARM_IASM(_expr) +#else +# define WM_ASM(_expr) __emit (_expr) +#endif + +#if defined(ARM_EMIT) +# define WM_EMIT(p, i) ARM_EMIT(p, i) +#else +# define WM_EMIT(p, i) +#endif + +enum { + WM_CC_EQ = 0x0, + WM_CC_NE = 0x1, + WM_CC_CS = 0x2, + WM_CC_HS = WM_CC_CS, + WM_CC_CC = 0x3, + WM_CC_LO = WM_CC_CC, + WM_CC_MI = 0x4, + WM_CC_PL = 0x5, + WM_CC_VS = 0x6, + WM_CC_VC = 0x7, + WM_CC_HI = 0x8, + WM_CC_LS = 0x9, + WM_CC_GE = 0xA, + WM_CC_LT = 0xB, + WM_CC_GT = 0xC, + WM_CC_LE = 0xD, + WM_CC_AL = 0xE, + WM_CC_NV = 0xF, + WM_CC_SHIFT = 28 +}; + +#if defined(ARM_DEF_COND) +# define WM_DEF_CC(_cc) ARM_DEF_COND(_cc) +#else +# define WM_DEF_CC(_cc) ((_cc & 0xF) << WM_CC_SHIFT) +#endif + + +enum { + WM_R0 = 0x0, + WM_R1 = 0x1, + WM_R2 = 0x2, + WM_R3 = 0x3, + WM_R4 = 0x4, + WM_R5 = 0x5, + WM_R6 = 0x6, + WM_R7 = 0x7, + WM_R8 = 0x8, + WM_R9 = 0x9, + WM_R10 = 0xA, + WM_R11 = 0xB, + WM_R12 = 0xC, + WM_R13 = 0xD, + WM_R14 = 0xE, + WM_R15 = 0xF, + + WM_wR0 = 0x0, + WM_wR1 = 0x1, + WM_wR2 = 0x2, + WM_wR3 = 0x3, + WM_wR4 = 0x4, + WM_wR5 = 0x5, + WM_wR6 = 0x6, + WM_wR7 = 0x7, + WM_wR8 = 0x8, + WM_wR9 = 0x9, + WM_wR10 = 0xA, + WM_wR11 = 0xB, + WM_wR12 = 0xC, + WM_wR13 = 0xD, + WM_wR14 = 0xE, + WM_wR15 = 0xF +}; + + +/* + * Qualifiers: + * H - 16-bit (HalfWord) SIMD + * W - 32-bit (Word) SIMD + * D - 64-bit (Double) + */ +enum { + WM_B = 0, + WM_H = 1, + WM_D = 2 +}; + +/* + * B.2.3 Transfers From Coprocessor Register (MRC) + * Table B-5 + */ +enum { + WM_TMRC_OP2 = 0, + WM_TMRC_CPNUM = 1, + + WM_TMOVMSK_OP2 = 1, + WM_TMOVMSK_CPNUM = 0, + + WM_TANDC_OP2 = 1, + WM_TANDC_CPNUM = 1, + + WM_TORC_OP2 = 2, + WM_TORC_CPNUM = 1, + + WM_TEXTRC_OP2 = 3, + WM_TEXTRC_CPNUM = 1, + + WM_TEXTRM_OP2 = 3, + WM_TEXTRM_CPNUM = 0 +}; + + +/* + * TANDC{Cond} R15 + * Performs AND across the fields of the SIMD PSR register (wCASF) and sends the result + * to CPSR; can be performed after a Byte, Half-word or Word operation that sets the flags. + * NOTE: R15 is omitted from the macro declaration; + */ +#define DEF_WM_TNADC_CC(_q, _cc) WM_DEF_CC((_cc)) + ((_q) << 0x16) + 0xE13F130 + +#define _WM_TNADC_CC(_q, _cc) WM_ASM(DEF_WM_TNADC_CC(_q, _cc)) +#define ARM_WM_TNADC_CC(_p, _q, _cc) WM_EMIT(_p, DEF_WM_TNADC_CC(_q, _cc)) + +/* inline assembly */ +#define _WM_TNADC(_q) _WM_TNADC_CC((_q), WM_CC_AL) +#define _WM_TNADCB() _WM_TNADC(WM_B) +#define _WM_TNADCH() _WM_TNADC(WM_H) +#define _WM_TNADCD() _WM_TNADC(WM_D) + +/* codegen */ +#define ARM_WM_TNADC(_p, _q) ARM_WM_TNADC_CC((_p), (_q), WM_CC_AL) +#define ARM_WM_TNADCB(_p) ARM_WM_TNADC(_p, WM_B) +#define ARM_WM_TNADCH(_p) ARM_WM_TNADC(_p, WM_H) +#define ARM_WM_TNADCD(_p) ARM_WM_TNADC(_p, WM_D) + + +/* + * TBCST{Cond} wRd, Rn + * Broadcasts a value from the ARM Source reg (Rn) to every SIMD position + * in the WMMX Destination reg (wRd). + */ +#define DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn) \ + WM_DEF_CC((_cc)) + ((_q) << 6) + ((_wrd) << 16) + ((_rn) << 12) + 0xE200010 + +#define _WM_TBCST_CC(_q, _cc, _wrd, _rn) WM_ASM(DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn)) +#define ARM_WM_TBCST_CC(_p, _q, _cc, _wrd, _rn) WM_EMIT(_p, DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn)) + +/* inline */ +#define _WM_TBCST(_q, _wrd, _rn) _WM_TBCST_CC(_q, WM_CC_AL, _wrd, _rn) +#define _WM_TBCSTB(_wrd, _rn) _WM_TBCST(WM_B) +#define _WM_TBCSTH(_wrd, _rn) _WM_TBCST(WM_H) +#define _WM_TBCSTD(_wrd, _rn) _WM_TBCST(WM_D) + +/* codegen */ +#define ARM_WM_TBCST(_p, _q, _wrd, _rn) ARM_WM_TBCST_CC(_p, _q, WM_CC_AL, _wrd, _rn) +#define ARM_WM_TBCSTB(_p, _wrd, _rn) _WM_TBCST(_p, WM_B) +#define ARM_WM_TBCSTH(_p, _wrd, _rn) _WM_TBCST(_p, WM_H) +#define ARM_WM_TBCSTD(_p, _wrd, _rn) _WM_TBCST(_p, WM_D) + + +#endif /* __WMMX_H__ */ -- cgit v1.1 From 7595b109642f29ffe0cf8bb3e4411243b92a606f Mon Sep 17 00:00:00 2001 From: Malte Hildingson Date: Sun, 27 Apr 2003 16:04:54 +0000 Subject: * tramp.c (alloc_code_buff): posix memory protection. (mono_create_trampoline): new string marshaling + minor fixes. (mono_create_method_pointer): delegates fix. svn path=/trunk/mono/; revision=14046 --- arm/tramp.c | 227 ++++++++++++++++++++---------------------------------------- 1 file changed, 75 insertions(+), 152 deletions(-) diff --git a/arm/tramp.c b/arm/tramp.c index 3e5af33..59d9fb4 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -1,6 +1,8 @@ /* * Create trampolines to invoke arbitrary functions. * Copyright (c) 2002 Sergey Chaban + * + * Contributions by Malte Hildingson */ #include "arm-codegen.h" @@ -10,17 +12,18 @@ # include #endif +#include + #include "mono/metadata/class.h" #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" -#if 1 +#if 0 # define ARM_DUMP_DISASM 1 #endif - /* prototypes for private functions (to avoid compiler warnings) */ void flush_icache (void); void* alloc_code_buff (int num_instr); @@ -64,15 +67,27 @@ void flush_icache () void* alloc_code_buff (int num_instr) { void* code_buff; + int code_size = num_instr * sizeof(arminstr_t); #if defined(_WIN32) || defined(UNDER_CE) int old_prot = 0; -#endif - - code_buff = malloc(num_instr * sizeof(arminstr_t)); -#if defined(_WIN32) || defined(UNDER_CE) - VirtualProtect(code_buff, num_instr * sizeof(arminstr_t), PAGE_EXECUTE_READWRITE, &old_prot); + code_buff = malloc(code_size); + VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot); +#else +#include +#include + int page_size = sysconf(_SC_PAGESIZE); + int new_code_size; + + new_code_size = code_size + page_size - 1; + code_buff = malloc(new_code_size); + code_buff = (void *) (((int) code_buff + page_size - 1) & ~(page_size - 1)); + + if (mprotect(code_buff, code_size, PROT_READ|PROT_WRITE|PROT_EXEC) != 0) { + g_critical (G_GNUC_PRETTY_FUNCTION + ": mprotect error: %s", g_strerror (errno)); + } #endif return code_buff; @@ -82,18 +97,15 @@ void* alloc_code_buff (int num_instr) /* * Refer to ARM Procedure Call Standard (APCS) for more info. */ -MonoPIFunc mono_create_trampoline (MonoMethod* method, int runtime) +MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { - MonoMethodSignature* sig; MonoType* param; MonoPIFunc code_buff; - arminstr_t* p, * utf8_addr, * free_addr, * str_new_addr; + arminstr_t* p; guint32 code_size, stack_size; guint32 simple_type; int i, hasthis, aregs, regc, stack_offs; - int utf8_offs, utf8_reg, utf8_stack_offs; int this_loaded; - int str_args, strc; guchar reg_alloc [ARM_NUM_ARG_REGS]; /* pessimistic estimation for prologue/epilogue size */ @@ -106,8 +118,6 @@ MonoPIFunc mono_create_trampoline (MonoMethod* method, int runtime) code_size += 2; stack_size = 0; - str_args = 0; - sig = method->signature; hasthis = sig->hasthis ? 1 : 0; aregs = ARM_NUM_ARG_REGS - hasthis; @@ -157,13 +167,6 @@ enum_calc_size: code_size += 2; stack_size += 4; } - - if (simple_type == MONO_TYPE_STRING - && !(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) - && !runtime) { - code_size += 6; /* rough */ - ++str_args; - } break; case MONO_TYPE_I8: case MONO_TYPE_U8: @@ -193,6 +196,7 @@ enum_calc_size: simple_type = param->data.klass->enum_basetype->type; goto enum_calc_size; } + if (mono_class_value_size(param->data.klass, NULL) != 4) { g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL)); } @@ -213,14 +217,11 @@ enum_calc_size: } } - if (str_args) code_size += 2; - code_buff = (MonoPIFunc)alloc_code_buff(code_size); p = (arminstr_t*)code_buff; /* prologue */ - p = arm_emit_lean_prologue((arminstr_t*)p, - stack_size + str_args*sizeof(gpointer), + p = arm_emit_lean_prologue(p, stack_size, /* save workset (r4-r7) */ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); @@ -229,7 +230,7 @@ enum_calc_size: /* callme - always present */ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1); /* retval */ - if (sig->ret->byref || (sig->ret->type != MONO_TYPE_VOID)) { + if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) { ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2); } /* this_obj */ @@ -247,44 +248,20 @@ enum_calc_size: ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4); } - if (str_args || sig->ret->type == MONO_TYPE_STRING) { - /* branch around address table */ - ARM_B(p, str_args ? 2 : 0); - - /* create branch table for string functions */ - if (str_args) { - /* allocate slots for convert - * and free functions only if - * we have some string args, - * otherwise only string_new - * is needed for retval. - */ - utf8_addr = p; - *p++ = (arminstr_t)&mono_string_to_utf8; - free_addr = p; - *p++ = (arminstr_t)&g_free; - } - str_new_addr = p; - *p++ = (arminstr_t)&mono_string_new_wrapper; - - strc = str_args; /* # of string args */ - } - stack_offs = stack_size; - utf8_stack_offs = stack_size + str_args*sizeof(gpointer); /* handle arguments */ /* in reverse order so we could use r0 (arg1) for memory transfers */ for (i = sig->param_count; --i >= 0;) { param = sig->params [i]; if (param->byref) { - if (i < aregs) { - ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE); - } else { - stack_offs -= sizeof(armword_t); - ARM_LDR_IMM(p, ARMREG_R4, REG_ARGP, i*ARG_SIZE); - ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, stack_offs); - } + if (i < aregs) { + ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE); + } else { + stack_offs -= sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + } } else { simple_type = param->type; enum_marshal: @@ -304,7 +281,7 @@ enum_marshal: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: -push_a_word: + case MONO_TYPE_STRING: if (i < aregs && reg_alloc [i] > 0) { /* pass in register */ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE); @@ -337,66 +314,23 @@ push_a_word: ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4); } break; - case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - goto push_a_word; - } else { - if (sig->hasthis && this_loaded) { - ARM_MOV_REG_REG(p, REG_THIS, ARMREG_A1); - this_loaded = 0; - } - - if (sig->hasthis && strc == str_args) { - ARM_PUSH(p, (1 << REG_THIS)); - /* adjust stack pointers */ - stack_offs += sizeof(armword_t); - utf8_stack_offs += sizeof(armword_t); - } - - utf8_offs = -(p + 2 - utf8_addr) * sizeof(arminstr_t); - utf8_reg = sig->hasthis ? REG_FUNC_ADDR : REG_THIS; - /* load function address */ - ARM_LDR_IMM(p, utf8_reg, ARMREG_PC, utf8_offs); - /* load MonoString ptr */ - ARM_LDR_IMM(p, ARMREG_A1, REG_ARGP, i*ARG_SIZE); - /* call string_to_utf8 function */ - ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); - ARM_MOV_REG_REG(p, ARMREG_PC, utf8_reg); - - /* count-down string args */ - --strc; - - if (sig->hasthis && strc == 0) { - ARM_POP(p, (1 << REG_THIS)); - /* restore stack pointers */ - stack_offs -= sizeof(armword_t); - utf8_stack_offs -= sizeof(armword_t); - } - - /* maintain list of allocated strings */ - utf8_stack_offs -= sizeof(gpointer); - ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, utf8_stack_offs); - - if (i < aregs && reg_alloc [i] > 0) { - /* pass in register */ - utf8_reg = ARMREG_A1 + hasthis + (aregs - reg_alloc [i]); - /* result returned in R0, avoid NOPs */ - if (utf8_reg != ARMREG_R0) { - ARM_MOV_REG_REG(p, utf8_reg, ARMREG_R0); - } - } else { - stack_offs -= sizeof(armword_t); - ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); - } - } - break; case MONO_TYPE_VALUETYPE: if (param->data.klass->enumtype) { /* it's an enum value, proceed based on its base type */ simple_type = param->data.klass->enum_basetype->type; goto enum_marshal; } else { - goto push_a_word; + if (i < aregs && reg_alloc[i] > 0) { + int vtreg = ARMREG_A1 + hasthis + + hasthis + (aregs - reg_alloc[i]); + ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE); + ARM_LDR_IMM(p, vtreg, vtreg, 0); + } else { + stack_offs -= sizeof(armword_t); + ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE); + ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0); + ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs); + } } break; @@ -415,9 +349,8 @@ push_a_word: ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR); - /* handle retval */ - if (sig->ret->byref) { + if (sig->ret->byref || string_ctor) { ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); } else { simple_type = sig->ret->type; @@ -446,24 +379,9 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: - ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); - break; + case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: - if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || runtime) { - /* return UTF8 string as-is */ - ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); - } else { - /* if result is non-null convert it back to MonoString */ - utf8_offs = -(p + 2 - str_new_addr) * sizeof(arminstr_t); - ARM_TEQ_REG_IMM8(p, ARMREG_R0, 0); - /* load mono_string_new_wrapper address */ - ARM_LDR_IMM_COND(p, ARMREG_R2, ARMREG_PC, utf8_offs, ARMCOND_NE); - /* call mono_string_new_wrapper */ - ARM_MOV_REG_REG_COND(p, ARMREG_LR, ARMREG_PC, ARMCOND_NE); - ARM_MOV_REG_REG_COND(p, ARMREG_PC, ARMREG_R2, ARMCOND_NE); - - ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); - } + ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0); break; /* * A 64-bit integer is returned in R0 and R1. @@ -487,26 +405,8 @@ enum_retvalue: break; } } - - /* free allocated strings */ - if (str_args) { - utf8_stack_offs = stack_size + str_args*sizeof(gpointer); - for (strc = str_args; --strc >= 0;) { - utf8_stack_offs -= sizeof(gpointer); - /* calc PC-relative offset to function addr */ - utf8_offs = -(p + 2 - free_addr) * sizeof(arminstr_t); - /* load function address */ - ARM_LDR_IMM(p, ARMREG_R2, ARMREG_PC, utf8_offs); - /* load MonoString ptr */ - ARM_LDR_IMM(p, ARMREG_A1, ARMREG_SP, utf8_stack_offs); - /* call free function */ - ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC); - ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R2); - } - } - - - p = arm_emit_std_epilogue(p, stack_size + str_args*sizeof(gpointer), + + p = arm_emit_std_epilogue(p, stack_size, /* restore R4-R7 */ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7)); @@ -524,6 +424,7 @@ enum_retvalue: #define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member) + /* * Returns a pointer to a native function that can be used to * call the specified method. @@ -541,6 +442,21 @@ void* mono_create_method_pointer (MonoMethod* method) void* code_buff; int i, stack_size, arg_pos, arg_add, stackval_pos, offs; int areg, reg_args, shift, pos; + MonoJitInfo *ji; + + /* + * If it is a static P/Invoke method just + * just return the pointer to the implementation + */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0(MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add(mono_root_domain, ji); + return method->addr; + } code_buff = alloc_code_buff(128); p = (guchar*)code_buff; @@ -567,7 +483,6 @@ void* mono_create_method_pointer (MonoMethod* method) p_exec = p; p += 4; - stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t); /* prologue */ @@ -751,6 +666,7 @@ void* mono_create_method_pointer (MonoMethod* method) case MONO_TYPE_OBJECT: case MONO_TYPE_CLASS: case MONO_TYPE_ARRAY: + case MONO_TYPE_SZARRAY: ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0); break; case MONO_TYPE_I8: @@ -778,6 +694,13 @@ void* mono_create_method_pointer (MonoMethod* method) _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff)); #endif + ji = g_new0(MonoJitInfo, 1); + ji->method = method; + ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff); + ji->code_start = (gpointer) code_buff; + + mono_jit_info_table_add(mono_root_domain, ji); + return code_buff; } -- cgit v1.1 From 3a48ea89b161b268bb74f013cc36f6aec59e550b Mon Sep 17 00:00:00 2001 From: Malte Hildingson Date: Thu, 1 May 2003 23:42:01 +0000 Subject: * tramp.c (mono_create_trampoline): tiny register allocation fix for reference types svn path=/trunk/mono/; revision=14195 --- arm/tramp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arm/tramp.c b/arm/tramp.c index 59d9fb4..12793c8 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -255,7 +255,7 @@ enum_calc_size: for (i = sig->param_count; --i >= 0;) { param = sig->params [i]; if (param->byref) { - if (i < aregs) { + if (i < aregs && reg_alloc[i] > 0) { ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE); } else { stack_offs -= sizeof(armword_t); -- cgit v1.1 From c4eeb3dfdd19546fb0712e5306d8d96a9a07580e Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 20 May 2003 10:44:31 +0000 Subject: 2003-05-20 Dietmar Maurer * mini-x86.c (mono_arch_get_allocatable_int_vars): allocate 8/16 bit values to registers svn path=/trunk/mono/; revision=14720 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index db9d34a..cb5b4a8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2003-05-20 Dietmar Maurer + + * x86/x86-codegen.h (x86_set_reg): add an assertion - it does + not work for all registers. Sat Feb 1 10:59:31 CET 2003 Paolo Molaro diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index d816e27..dd0c1e7 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1430,6 +1430,7 @@ typedef union { #define x86_set_reg(inst,cond,reg,is_signed) \ do { \ + g_assert (reg < 4); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ -- cgit v1.1 From 3af153bd53728da9da9215141b1341d60b447bd3 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 21 May 2003 12:45:22 +0000 Subject: 2003-05-21 Dietmar Maurer * mini-x86.c (mono_arch_get_allocatable_int_vars): dont allocate I1 to registers because there is no simply way to sign extend 8bit quantities in caller saved registers on x86. * inssel-float.brg: set costs of some rules to 2 so that monobure always select the arch. specific ones if supplied, regardless of the order we pass the files to monoburg. svn path=/trunk/mono/; revision=14757 --- x86/x86-codegen.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index dd0c1e7..5f9c8ea 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -162,6 +162,8 @@ typedef union { #define X86_IS_SCRATCH(reg) (X86_CALLER_REGS & (1 << (reg))) /* X86_EAX, X86_ECX, or X86_EDX */ #define X86_IS_CALLEE(reg) (X86_CALLEE_REGS & (1 << (reg))) /* X86_ESI, X86_EDI, X86_EBX, or X86_EBP */ +#define X86_IS_BYTE_REG(reg) ((reg) < 4) + /* // Frame structure: // @@ -952,6 +954,7 @@ typedef union { #define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \ do { \ unsigned char op = 0xb6; \ + g_assert (is_half || X86_IS_BYTE_REG (reg)); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ @@ -1430,7 +1433,7 @@ typedef union { #define x86_set_reg(inst,cond,reg,is_signed) \ do { \ - g_assert (reg < 4); \ + g_assert (X86_IS_BYTE_REG (reg)); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ -- cgit v1.1 From df86960d595f0284a453fe3fc67687b707148dbf Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 21 May 2003 17:57:05 +0000 Subject: Some fixes and more complete support. svn path=/trunk/mono/; revision=14769 --- ppc/ppc-codegen.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index d3473ad..177e014 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -12,6 +12,7 @@ typedef enum { ppc_r0 = 0, ppc_r1, + ppc_sp = ppc_r1, ppc_r2, ppc_r3, ppc_r4, @@ -81,10 +82,49 @@ typedef enum { typedef enum { ppc_lr = 256, + ppc_ctr = 256 + 32, + ppc_xer = 32 } PPCSpecialRegister; -#define ppc_emit32(c,x) *((guint32 *) c) = x; ((guint32 *)c)++ - +enum { + /* B0 operand for branches */ + PPC_BR_LIKELY = 1, /* can be or'ed with the conditional variants */ + PPC_BR_FALSE = 4, + PPC_BR_TRUE = 12, + PPC_BR_ALWAYS = 20, + /* B1 operand for branches */ + PPC_BR_LT = 0, + PPC_BR_GT = 1, + PPC_BR_EQ = 2, + PPC_BR_SO = 3 +}; + +enum { + PPC_TRAP_LT = 1, + PPC_TRAP_GT = 2, + PPC_TRAP_EQ = 4, + PPC_TRAP_LT_UN = 8, + PPC_TRAP_GT_UN = 16, + PPC_TRAP_LE = 1 + PPC_TRAP_EQ, + PPC_TRAP_GE = 2 + PPC_TRAP_EQ, + PPC_TRAP_LE_UN = 8 + PPC_TRAP_EQ, + PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ +}; + +#define ppc_emit32(c,x) do { *((guint32 *) c) = x; ((guint32 *)c)++;} while (0) + +#define ppc_is_imm16(val) ((gint)val >= -(1<<16) && (gint)val <= ((1<<16)-1)) + +#define ppc_load(c,D,v) do { \ + if (ppc_is_imm16 ((v))) { \ + ppc_li ((c), (D), (v)); \ + } else { \ + ppc_lis ((c), (D), (guint32)(v) >> 16); \ + ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ + } \ + } while (0) + +#define ppc_break(c) ppc_tw((c),31,0,0) #define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) #define ppc_addis(c,D,A,d) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) #define ppc_li(c,D,v) ppc_addi (c, D, 0, v); @@ -103,8 +143,13 @@ typedef enum { #define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr) #define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) #define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S) +#define ppc_mtctr(c,S) ppc_mtspr (c, ppc_ctr, S) +#define ppc_mtxer(c,S) ppc_mtspr (c, ppc_xer, S) #define ppc_b(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2)) +#define ppc_bl(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 1) +#define ppc_ba(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 2) +#define ppc_bla(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 3) #define ppc_blrl(c) ppc_emit32 (c, 0x4e800021) #define ppc_blr(c) ppc_emit32 (c, 0x4e800020) @@ -176,8 +221,8 @@ my and Ximian's copyright to this code. ;) #define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0) #define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1) -#define ppc_andid(c,S,A,d) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | (0x0000 || (guint16)(d))) -#define ppc_andisd(c,S,A,d) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d) || 0x0000)) +#define ppc_andid(c,S,A,d) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d))) +#define ppc_andisd(c,S,A,d) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d))) #define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK) #define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0) @@ -471,6 +516,8 @@ my and Ximian's copyright to this code. ;) #define ppc_nor(c,A,S,B) ppc_norx(c,A,S,B,0) #define ppc_nord(c,A,S,B) ppc_norx(c,A,S,B,1) +#define ppc_not(c,A,S) ppc_norx(c,A,S,S,0) + #define ppc_orx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (444 << 1) | Rc) #define ppc_ord(c,A,S,B) ppc_orx(c,A,S,B,1) @@ -543,6 +590,8 @@ my and Ximian's copyright to this code. ;) #define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0) #define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1) +#define ppc_sub(c,D,A,B) ppc_subf(c,D,B.A) + #define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc) #define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0) #define ppc_subfcd(c,D,A,B) ppc_subfcx(c,D,A,B,0,1) -- cgit v1.1 From 2ad34b0dc225bf0b2efeea63c2f9287a1dbad162 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 9 Jun 2003 18:28:54 +0000 Subject: Small updates. svn path=/trunk/mono/; revision=15250 --- x86/x86-codegen.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 5f9c8ea..6c49c21 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -90,6 +90,16 @@ typedef enum { X86_CC_NO = 11, X86_NCC } X86_CC; + +/* FP status */ +enum { + X86_FP_C0 = 0x100, + X86_FP_C1 = 0x200, + X86_FP_C2 = 0x400, + X86_FP_C3 = 0x4000, + X86_FP_CC_MASK = 0x4500 +}; + /* // prefix code */ @@ -104,6 +114,8 @@ typedef enum { X86_ES_PREFIX = 0x26, X86_FS_PREFIX = 0x64, X86_GS_PREFIX = 0x65, + X86_UNLIKELY_PREFIX = 0x2E, + X86_LIKELY_PREFIX = 0x3E, X86_OPERAND_PREFIX = 0x66, X86_ADDRESS_PREFIX = 0x67 } X86_Prefix; @@ -1187,6 +1199,12 @@ typedef union { *(inst)++ = (unsigned char)0xe8; \ } while (0) +#define x86_fldpi(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xeb; \ + } while (0) + #define x86_fst(inst,mem,is_double,pop_stack) \ do { \ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ -- cgit v1.1 From c439e3df5cfa7c67d976258228cb9188a218c21d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 25 Jun 2003 13:18:00 +0000 Subject: FP control word enum. svn path=/trunk/mono/; revision=15623 --- x86/x86-codegen.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6c49c21..e0b12ff 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -100,6 +100,29 @@ enum { X86_FP_CC_MASK = 0x4500 }; +/* FP control word */ +enum { + X86_FPCW_INVOPEX_MASK = 0x1, + X86_FPCW_DENOPEX_MASK = 0x2, + X86_FPCW_ZERODIV_MASK = 0x4, + X86_FPCW_OVFEX_MASK = 0x8, + X86_FPCW_UNDFEX_MASK = 0x10, + X86_FPCW_PRECEX_MASK = 0x20, + X86_FPCW_PRECC_MASK = 0x300, + X86_FPCW_ROUNDC_MASK = 0xc00, + + /* values for precision control */ + X86_FPCW_PREC_SINGLE = 0, + X86_FPCW_PREC_DOUBLE = 0x200, + X86_FPCW_PREC_EXTENDED = 0x300, + + /* values for rounding control */ + X86_FPCW_ROUND_NEAREST = 0, + X86_FPCW_ROUND_DOWN = 0x400, + X86_FPCW_ROUND_UP = 0x800, + X86_FPCW_ROUND_TOZERO = 0xc00 +}; + /* // prefix code */ -- cgit v1.1 From 6e851a87092161092c6e8f06f4de13fb45bc04a6 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 1 Jul 2003 11:12:47 +0000 Subject: Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us). svn path=/trunk/mono/; revision=15809 --- ChangeLog | 5 + alpha/tramp.c | 435 +++++++++++++++++++++++++++++++++++----------------------- 2 files changed, 272 insertions(+), 168 deletions(-) diff --git a/ChangeLog b/ChangeLog index cb5b4a8..3128387 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro + + * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us). + 2003-05-20 Dietmar Maurer * x86/x86-codegen.h (x86_set_reg): add an assertion - it does diff --git a/alpha/tramp.c b/alpha/tramp.c index 67e8613..c02a83a 100644 --- a/alpha/tramp.c +++ b/alpha/tramp.c @@ -36,30 +36,56 @@ fun..ng: // called from inside the module. ret zero, (ra), 1 // return. -// assuming that the procedure is in a0. -#define emit_prologue( p ) \ - alpha_ldah( p, alpha_gp, alpha_pv, 0 ); \ - alpha_lda( p, alpha_sp, alpha_sp, -32 ); \ - alpha_lda( p, alpha_gp, alpha_gp, 0 ); \ - alpha_stq( p, alpha_ra, alpha_sp, 0 ); \ - alpha_stq( p, alpha_fp, alpha_sp, 8 ); \ - alpha_mov( p, alpha_sp, alpha_fp ) - -#define emit_move_a0_to_pv( p ) \ - alpha_mov( p, alpha_a0, alpha_pv ) - -#define emit_call( p ) \ - alpha_jsr( p, alpha_ra, alpha_pv, 0 ); \ - alpha_ldah( p, alpha_gp, alpha_ra, 0 ); \ - alpha_lda( p, alpha_gp, alpha_gp, 0 ); \ - -#define emit_epilogue( p ) \ - alpha_mov( p, alpha_fp, alpha_sp ); \ - alpha_ldq( p, alpha_ra, alpha_sp, 0 ); \ - alpha_ldq( p, alpha_fp, alpha_sp, 8 ); \ - alpha_lda( p, alpha_sp, alpha_sp, 32 ); \ - alpha_ret( p, alpha_ra ) - +// min SIZE = 48 +// our call must look like this. + +call_func: + ldgp gp, 0(pv) +call_func..ng: + .prologue + lda sp, -SIZE(sp) // grow stack SIZE bytes. + stq ra, SIZE-48(sp) // store ra + stq fp, SIZE-40(sp) // store fp (frame pointer) + stq a0, SIZE-32(sp) // store args. a0 = func + stq a1, SIZE-24(sp) // a1 = retval + stq a2, SIZE-16(sp) // a2 = this + stq a3, SIZE-8(sp) // a3 = args + mov sp, fp // set frame pointer + mov pv, a0 // func + + .calling_arg_this + mov a1, a2 + + .calling_arg_6plus + ldq t0, POS(a3) + stq t0, 0(sp) + ldq t1, POS(a3) + stq t1, 8(sp) + ... SIZE-56 ... + + mov zero,a1 + mov zero,a2 + mov zero,a3 + mov zero,a4 + mov zero,a5 + + .do_call + jsr ra, (pv) // call func + ldgp gp, 0(ra) // restore gp. + mov v0, t1 // move return value into t1 + + .do_store_retval + ldq t0, SIZE-24(fp) // load retval into t2 + stl t1, 0(t0) // store value. + + .finished + mov fp,sp + ldq ra,SIZE-48(sp) + ldq fp,SIZE-40(sp) + lda sp,SIZE(sp) + ret zero,(ra),1 + + */ /*****************************************************/ @@ -76,179 +102,253 @@ fun..ng: // called from inside the module. #include "mono/metadata/debug-helpers.h" #define AXP_GENERAL_REGS 6 -#define AXP_MIN_STACK_SIZE 32 - -#define PROLOG_INS 6 -#define CALL_INS 3 -#define EPILOG_INS 5 +#define AXP_MIN_STACK_SIZE 24 +#define ARG_SIZE sizeof(stackval) +#define ARG_LOC(x) (x * sizeof( stackval ) ) /*****************************************************/ -typedef struct { - guint i_regs; - guint f_regs; - guint stack_size; - guint code_size; -} size_data; - - -static char* -sig_to_name (MonoMethodSignature *sig, const char *prefix) -{ - /* from sparc.c. this should be global */ - - int i; - char *result; - GString *res = g_string_new (""); - - if (prefix) { - g_string_append (res, prefix); - g_string_append_c (res, '_'); - } - - mono_type_get_desc (res, sig->ret, TRUE); - - for (i = 0; i < sig->param_count; ++i) { - g_string_append_c (res, '_'); - mono_type_get_desc (res, sig->params [i], TRUE); - } - result = res->str; - g_string_free (res, FALSE); - return result; -} - - -static void inline -add_general ( size_data *sz, gboolean simple) +/* */ +/* void func (void (*callme)(), void *retval, */ +/* void *this_obj, stackval *arguments); */ +static inline guint8 * +emit_prolog (guint8 *p, const gint SIZE, int hasthis ) { - // we don't really know yet, so just put something in here. - if ( sz->i_regs >= AXP_GENERAL_REGS) - { - sz->stack_size += 8; - } - - // ...and it probably doesn't matter if our code size is a - // little large... - - sz->code_size += 12; - sz->i_regs ++; + // 9 instructions. + alpha_ldah( p, alpha_gp, alpha_pv, 0 ); + alpha_lda( p, alpha_sp, alpha_sp, -SIZE ); // grow stack down SIZE + alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(pv) + + /* TODO: we really don't need to store everything. + alpha_a1: We have to store this in order to return the retval. + + alpha_a0: func pointer can be moved directly to alpha_pv + alpha_a3: don't need args after we are finished. + alpha_a2: will be moved into alpha_a0... if hasthis is true. + */ + /* store parameters on stack.*/ + alpha_stq( p, alpha_ra, alpha_sp, SIZE-24 ); // ra + alpha_stq( p, alpha_fp, alpha_sp, SIZE-16 ); // fp + alpha_stq( p, alpha_a1, alpha_sp, SIZE-8 ); // retval + + /* set the frame pointer */ + alpha_mov1( p, alpha_sp, alpha_fp ); + + /* move the args into t0, pv */ + alpha_mov1( p, alpha_a0, alpha_pv ); + alpha_mov1( p, alpha_a3, alpha_t0 ); + + // Move the this pointer into a0. + if( hasthis ) + alpha_mov1( p, alpha_a2, alpha_a0 ); + return p; } -static void -calculate_sizes (MonoMethodSignature *sig, - size_data *sz, - gboolean string_ctor) +static inline guint8 * +emit_call( guint8 *p , const gint SIZE ) { - guint i, size; - guint32 simpletype, align; - - sz->i_regs = 0; - sz->f_regs = 0; - sz->stack_size = AXP_MIN_STACK_SIZE; - sz->code_size = 4 * (PROLOG_INS + CALL_INS + EPILOG_INS); - - if (sig->hasthis) { - add_general (&gr, sz, TRUE); - } + // 3 instructions + /* call func */ + alpha_jsr( p, alpha_ra, alpha_pv, 0 ); // jsr ra, 0(pv) - for (i = 0; i < sig->param_count; ++i) { - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_R4: - case MONO_TYPE_R8: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - add_general (&gr, sz, TRUE); - break; - case MONO_TYPE_VALUETYPE: - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - } - - /* align stack size to 8 */ - sz->stack_size = (sz->stack_size + 8) & ~8; - sz->local_size = (sz->local_size + 8) & ~8; + /* reload the gp */ + alpha_ldah( p, alpha_gp, alpha_ra, 0 ); + alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(ra) + + return p; } -/* */ -/* void func (void (*callme)(), void *retval, */ -/* void *this_obj, stackval *arguments); */ static inline guint8 * -emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +emit_store_return_default(guint8 *p, const gint SIZE ) { - guint stack_size; - - stack_size = sz->stack_size; - - /* function prolog */ - alpha_ldah( p, alpha_gp, alpha_pv, 0 ); - alpha_lda( p, alpha_sp, alpha_sp, -stack_size ); - alpha_lda( p, alpha_gp, alpha_gp, 0 ); - - /* save ra, fp */ - alpha_stq( p, alpha_ra, alpha_sp, 0 ); - alpha_stq( p, alpha_fp, alpha_sp, 8 ); - - /* store the return parameter */ - alpha_stq( p, alpha_a0, alpha_sp, 16 ); - alpha_stq( p, alpha_a1, alpha_sp, 24 ); - - /* load fp into sp */ - alpha_mov( p, alpha_sp, alpha_fp ) - + // 2 instructions. + + /* TODO: This probably do different stuff based on the value. + you know, like stq/l/w. and s/f. + */ + alpha_ldq( p, alpha_t0, alpha_fp, SIZE-8 ); // load void * retval + alpha_stq( p, alpha_v0, alpha_t0, 0 ); // store the result to *retval. return p; } + static inline guint8 * -emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) -{ - alpha_mov( p, alpha_fp, alpha_sp ); +emit_epilog (guint8 *p, const gint SIZE ) +{ + // 5 instructions. + alpha_mov1( p, alpha_fp, alpha_sp ); /* restore fp, ra, sp */ - alpha_ldq( p, alpha_ra, alpha_sp, 0 ); - alpha_ldq( p, alpha_fp, alpha_sp, 8 ); - alpha_lda( p, alpha_sp, alpha_sp, 32 ); + alpha_ldq( p, alpha_ra, alpha_sp, SIZE-24 ); + alpha_ldq( p, alpha_fp, alpha_sp, SIZE-16 ); + alpha_lda( p, alpha_sp, alpha_sp, SIZE ); /* return */ - alpha_ret( p, alpha_ra ); + alpha_ret( p, alpha_ra, 1 ); + return p; } -static inline guint8 * -emit_call( guint8 *p, MonoMethodSignature *sig, size_data *sz ) +static void calculate_size(MonoMethodSignature *sig, int * INSTRUCTIONS, int * STACK ) { - /* move a0 into pv, ready to call */ - alpha_mov( p, alpha_a0, alpha_pv ); + int alpharegs; - /* call arg */ - alpha_jsr( p, alpha_ra, alpha_pv, 0 ); + alpharegs = AXP_GENERAL_REGS - (sig->hasthis?1:0); - /* reload the gp */ - alpha_ldah( p, alpha_gp, alpha_ra, 0 ); - alpha_lda( p, alpha_gp, alpha_gp, 0 ); + *STACK = AXP_MIN_STACK_SIZE; + *INSTRUCTIONS = 20; // Base: 20 instructions. + + if( sig->param_count - alpharegs > 0 ) + { + *STACK += ARG_SIZE * (sig->param_count - alpharegs ); + // plus 3 (potential) for each stack parameter. + *INSTRUCTIONS += ( sig->param_count - alpharegs ) * 3; + // plus 2 (potential) for each register parameter. + *INSTRUCTIONS += ( alpharegs * 2 ); + } + else + { + // plus 2 (potential) for each register parameter. + *INSTRUCTIONS += ( sig->param_count * 2 ); + } } - MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { - g_error ("Unsupported arch"); - return NULL; + unsigned char *p; + unsigned char *buffer; + MonoType* param; + + int i, pos; + int alpharegs; + int hasthis; + int STACK_SIZE; + int BUFFER_SIZE; + int simple_type; + int regbase; + + // Set up basic stuff. like has this. + hasthis = !!sig->hasthis; + alpharegs = AXP_GENERAL_REGS - hasthis; + regbase = hasthis?alpha_a1:alpha_a0 ; + + // Make a ballpark estimate for now. + calculate_size( sig, &BUFFER_SIZE, &STACK_SIZE ); + + // convert to the correct number of bytes. + BUFFER_SIZE = BUFFER_SIZE * 4; + + + // allocate. + buffer = p = malloc(BUFFER_SIZE); + memset( buffer, 0, BUFFER_SIZE ); + pos = 0; + + // Ok, start creating this thing. + p = emit_prolog( p, STACK_SIZE, hasthis ); + + // copy everything into the correct register/stack space + for (i = sig->param_count; --i >= 0; ) + { + param = sig->params [i]; + + if( param->byref ) + { + if( i > alpharegs ) + { + // load into temp register, then store on the stack + alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i )); + alpha_stl( p, alpha_t1, alpha_sp, pos ); + pos += 8; + + if( pos > 128 ) + g_error( "Too large." ); + } + else + { + // load into register + alpha_ldq( p, regbase + i, alpha_t0, ARG_LOC( i ) ); + } + } + else + { + simple_type = param->type; + if( simple_type == MONO_TYPE_VALUETYPE ) + { + if (sig->ret->data.klass->enumtype) + simple_type = sig->ret->data.klass->enum_basetype->type; + } + + switch (simple_type) + { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + // 8 bytes + if( i > alpharegs ) + { + // load into temp register, then store on the stack + alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); + alpha_stq( p, alpha_t1, alpha_sp, pos ); + pos += 8; + } + else + { + // load into register + alpha_ldq( p, regbase + i, alpha_t0, ARG_LOC(i) ); + } + break; + case MONO_TYPE_R4: + case MONO_TYPE_R8: + /* + // floating point... Maybe this does the correct thing. + if( i > alpharegs ) + { + alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); + alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_ft2 ); + alpha_stt( p, alpha_ft2, alpha_sp, pos ); + pos += 8; + } + else + { + alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC(i) ); + alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_fa0 + i + hasthis ); + } + break; + */ + case MONO_TYPE_VALUETYPE: + g_error ("Not implemented: ValueType as parameter to delegate." ); + break; + default: + g_error( "Not implemented." ); + break; + } + } + } + + // Now call the function and store the return parameter. + p = emit_call( p, STACK_SIZE ); + p = emit_store_return_default( p, STACK_SIZE ); + p = emit_epilog( p, STACK_SIZE ); + + if( p > buffer + BUFFER_SIZE ) + g_error( "Buffer overflow." ); + + return (MonoPIFunc)buffer; } void * @@ -257,4 +357,3 @@ mono_create_method_pointer (MonoMethod *method) g_error ("Unsupported arch"); return NULL; } - -- cgit v1.1 From ebc38557433accd79fce2e38dff0505dfded5691 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 31 Jul 2003 14:32:42 +0000 Subject: Thu Jul 31 16:19:07 CEST 2003 Paolo Molaro * configure.in, etc.: portability fixes and support for buidling outside the srcdir from Laurent Morichetti . svn path=/trunk/mono/; revision=16937 --- arm/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arm/Makefile.am b/arm/Makefile.am index afce5cd..b245bcd 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -14,7 +14,7 @@ libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ arm-dis.h arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th - bash ./dpiops.sh + bash $(srcdir)/dpiops.sh CLEANFILES = $(BUILT_SOURCES) -- cgit v1.1 From c750ad8fea95e1fc81150e516ee26fbe79ab570d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 7 Aug 2003 14:13:05 +0000 Subject: Fixed imm16 range check. svn path=/trunk/mono/; revision=17157 --- ppc/ppc-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 177e014..2f62aa4 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -113,7 +113,7 @@ enum { #define ppc_emit32(c,x) do { *((guint32 *) c) = x; ((guint32 *)c)++;} while (0) -#define ppc_is_imm16(val) ((gint)val >= -(1<<16) && (gint)val <= ((1<<16)-1)) +#define ppc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) #define ppc_load(c,D,v) do { \ if (ppc_is_imm16 ((v))) { \ -- cgit v1.1 From 6260d65a087be486df039c80eba92e44eb7a220d Mon Sep 17 00:00:00 2001 From: ct Date: Tue, 19 Aug 2003 02:53:23 +0000 Subject: added floating point instructions for adding double, single, and quad numbers svn path=/trunk/mono/; revision=17393 --- sparc/sparc-codegen.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 04a23c3..4283e6a 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -385,6 +385,15 @@ typedef struct { #define sparc_fop(ins,r1,op,r2,dest) sparc_encode_format3c((ins),2,(op),(r1),52,(r2),(dest)) #define sparc_fcmp(ins,r1,op,r2) sparc_encode_format3c((ins),2,(op),(r1),53,(r2),0) +/* fadd for a single has an op code of 65, double 66, quad 67 */ +#define sparc_fadds(ins, r1, op, r2, dest) sparc_fop( ins, r1, 65, r2, dest ) +#define sparc_faddd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 66, r2, dest ) +#define sparc_faddq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 67, r2, dest ) + +#define sparc_fsubs(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) +#define sparc_fsubd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) +#define sparc_fsubq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) + /* logical */ #define sparc_and(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|1,(dest)) #define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|1,(dest)) @@ -451,7 +460,6 @@ typedef struct { /* synthetic instructions */ #define sparc_cmp(ins,r1,r2) sparc_sub((ins),sparc_cc,(r1),(r2),sparc_g0) #define sparc_cmp_imm(ins,r1,imm) sparc_sub_imm((ins),sparc_cc,(r1),(imm),sparc_g0) - #define sparc_jmp(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_g0) #define sparc_jmp_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_g0) #define sparc_call(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_o7) -- cgit v1.1 From ed628ad0776db600fab8d5e4bcd6b563f5e808fd Mon Sep 17 00:00:00 2001 From: ct Date: Tue, 19 Aug 2003 03:04:34 +0000 Subject: added more asm macros for floating point subtraction of single/double/quad svn path=/trunk/mono/; revision=17394 --- sparc/sparc-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 4283e6a..d756cf6 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -391,8 +391,8 @@ typedef struct { #define sparc_faddq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 67, r2, dest ) #define sparc_fsubs(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) -#define sparc_fsubd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) -#define sparc_fsubq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) +#define sparc_fsubd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 70, r2, dest ) +#define sparc_fsubq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 71, r2, dest ) /* logical */ #define sparc_and(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|1,(dest)) -- cgit v1.1 From 3d0f6d935e3a9c180d0bbb14fc371d40e53b7872 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 21 Aug 2003 15:23:31 +0000 Subject: 2003-08-21 Zoltan Varga * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). svn path=/trunk/mono/; revision=17470 --- ChangeLog | 3 +++ x86/tramp.c | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3128387..f553576 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2003-08-21 Zoltan Varga + + * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro diff --git a/x86/tramp.c b/x86/tramp.c index b6dfeb2..dab0d78 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -78,7 +78,6 @@ enum_calc_size: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: - case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: @@ -92,7 +91,7 @@ enum_calc_size: simpletype = sig->params [i]->data.klass->enum_basetype->type; goto enum_calc_size; } - if ((size = mono_class_value_size (sig->params [i]->data.klass, NULL)) != 4) { + if ((size = mono_class_native_size (sig->params [i]->data.klass, NULL)) != 4) { stack_size += size + 3; stack_size &= ~3; code_size += 32; @@ -106,6 +105,10 @@ enum_calc_size: stack_size += 8; code_size += i < 10 ? 5 : 8; break; + case MONO_TYPE_R4: + stack_size += 4; + code_size += i < 10 ? 10 : 13; + break; case MONO_TYPE_R8: stack_size += 8; code_size += i < 10 ? 7 : 10; @@ -180,7 +183,7 @@ enum_marshal: break; case MONO_TYPE_VALUETYPE: if (!sig->params [i - 1]->data.klass->enumtype) { - int size = mono_class_value_size (sig->params [i - 1]->data.klass, NULL); + int size = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); if (size == 4) { /* it's a structure that fits in 4 bytes, need to push the value pointed to */ x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); -- cgit v1.1 From 0fed0582997210e2a0ac71a527dbd319a85aebcb Mon Sep 17 00:00:00 2001 From: ct Date: Sun, 24 Aug 2003 22:49:45 +0000 Subject: completed the set of floating point ops svn path=/trunk/mono/; revision=17564 --- sparc/sparc-codegen.h | 118 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 90 insertions(+), 28 deletions(-) diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index d756cf6..b2a5836 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -153,28 +153,45 @@ typedef enum { typedef enum { /* fop1 format */ - sparc_fitos = 196, - sparc_fitod = 200, - sparc_fstoi = 209, - sparc_fdtoi = 210, - sparc_fstod = 201, - sparc_fdtos = 198, - sparc_fmov = 1, - sparc_fneg = 5, - sparc_fabs = 9, - sparc_fsqrts = 41, - sparc_fsqrtd = 42, - sparc_fadds = 65, - sparc_faddd = 66, - sparc_fsubs = 69, - sparc_fsubd = 70, - sparc_fmuls = 73, - sparc_fmuld = 74, - sparc_fdivs = 77, - sparc_fdivd = 78, + sparc_fitos_val = 196, + sparc_fitod_val = 200, + sparc_fitoq_val = 204, + sparc_fstoi_val = 209, + sparc_fdtoi_val = 210, + sparc_fqtoi_val = 211, + sparc_fstod_val = 201, + sparc_fstoq_val = 205, + sparc_fdtos_val = 198, + sparc_fdtoq_val = 206, + sparc_fqtos_val = 199, + sparc_fqtod_val = 203, + sparc_fmovs_val = 1, + sparc_fnegs_val = 5, + sparc_fabss_val = 9, + sparc_fsqrts_val = 41, + sparc_fsqrtd_val = 42, + sparc_fsqrtq_val = 43, + sparc_fadds_val = 65, + sparc_faddd_val = 66, + sparc_faddq_val = 67, + sparc_fsubs_val = 69, + sparc_fsubd_val = 70, + sparc_fsubq_val = 71, + sparc_fmuls_val = 73, + sparc_fmuld_val = 74, + sparc_fmulq_val = 75, + sparc_fsmuld_val = 105, + sparc_fdmulq_val = 111, + sparc_fdivs_val = 77, + sparc_fdivd_val = 78, + sparc_fdivq_val = 79, /* fop2 format */ - sparc_fcmps = 81, - sparc_fcmpd = 82 + sparc_fcmps_val = 81, + sparc_fcmpd_val = 82, + sparc_fcmpq_val = 83, + sparc_fcmpes_val = 85, + sparc_fcmped_val = 86, + sparc_fcmpeq_val = 87 } SparcFOp; typedef struct { @@ -385,14 +402,59 @@ typedef struct { #define sparc_fop(ins,r1,op,r2,dest) sparc_encode_format3c((ins),2,(op),(r1),52,(r2),(dest)) #define sparc_fcmp(ins,r1,op,r2) sparc_encode_format3c((ins),2,(op),(r1),53,(r2),0) -/* fadd for a single has an op code of 65, double 66, quad 67 */ -#define sparc_fadds(ins, r1, op, r2, dest) sparc_fop( ins, r1, 65, r2, dest ) -#define sparc_faddd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 66, r2, dest ) -#define sparc_faddq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 67, r2, dest ) +/* format 1 fops */ +#define sparc_fadds(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fadds_val, r2, dest ) +#define sparc_faddd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddd_val, r2, dest ) +#define sparc_faddq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddq_val, r2, dest ) -#define sparc_fsubs(ins, r1, op, r2, dest) sparc_fop( ins, r1, 69, r2, dest ) -#define sparc_fsubd(ins, r1, op, r2, dest) sparc_fop( ins, r1, 70, r2, dest ) -#define sparc_fsubq(ins, r1, op, r2, dest) sparc_fop( ins, r1, 71, r2, dest ) +#define sparc_fsubs(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubs_val, r2, dest ) +#define sparc_fsubd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubd_val, r2, dest ) +b#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, dest ) + +#define sparc_fmuls( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuls_val, r2, dest ) +#define sparc_fmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuld_val, r2, dest ) +#define sparc_fmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmulq_val, r2, dest ) + +#define sparc_fsmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fsmuld_val, r2, dest ) +#define sparc_fdmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdmulq_val, r2, dest ) + +#define sparc_fdivs( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivs_val, r2, dest ) +#define sparc_fdivd( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivd_val, r2, dest ) +#define sparc_fdivq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivq_val, r2, dest ) + +#define sparc_fitos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitos_val, r2, dest ) +#define sparc_fitod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitod_val, r2, dest ) +#define sparc_fitoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitoq_val, r2, dest ) + +#define sparc_fstoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoi_val, r2, dest ) +#define sparc_fdtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoi_val, r2, dest ) +#define sparc_fqtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtoi_val, r2, dest ) + +#define sparc_fstod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstod_val, r2, dest ) +#define sparc_fstoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoq_val, r2, dest ) + +#define sparc_fdtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtos_val, r2, dest ) +#define sparc_fdtoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoq_val, r2, dest ) + +#define sparc_fqtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtos_val, r2, dest ) +#define sparc_fqtod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtod_val, r2, dest ) + +#define sparc_fmovs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fmovs_val, r2, dest ) +#define sparc_fnegs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fnegs_val, r2, dest ) +#define sparc_fabss( ins, r2, dest ) sparc_fop( ins, 0, sparc_fabss_val, r2, dest ) + +#define sparc_fsqrts( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrts_val, r2, dest ) +#define sparc_fsqrtd( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtd_val, r2, dest ) +#define sparc_fsqrtq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtq_val, r2, dest ) + +/* format 2 fops */ + +#define sparc_fcmps( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmps_val, r2 ) +#define sparc_fcmpd( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpd_val, r2 ) +#define sparc_fcmpq( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpq_val, r2 ) +#define sparc_fcmpes( ins, r1, r2 ) sparc_fcmpes( ins, r1, sparc_fcmpes_val, r2 ) +#define sparc_fcmped( ins, r1, r2 ) sparc_fcmped( ins, r1, sparc_fcmped_val, r2 ) +#define sparc_fcmpeq( ins, r1, r2 ) sparc_fcmpeq( ins, r1, sparc_fcmpeq_val, r2 ) /* logical */ #define sparc_and(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|1,(dest)) -- cgit v1.1 From 935c93eeaff3ad8ccee032ade3584a7f6ab8f4a1 Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Mon, 25 Aug 2003 13:38:19 +0000 Subject: .cvsignore update svn path=/trunk/mono/; revision=17581 --- alpha/.cvsignore | 4 ++++ s390/.cvsignore | 4 ++++ sparc/.cvsignore | 1 + 3 files changed, 9 insertions(+) create mode 100644 alpha/.cvsignore create mode 100644 s390/.cvsignore diff --git a/alpha/.cvsignore b/alpha/.cvsignore new file mode 100644 index 0000000..6358454 --- /dev/null +++ b/alpha/.cvsignore @@ -0,0 +1,4 @@ +Makefile.in +Makefile +.deps +.cvsignore diff --git a/s390/.cvsignore b/s390/.cvsignore new file mode 100644 index 0000000..6358454 --- /dev/null +++ b/s390/.cvsignore @@ -0,0 +1,4 @@ +Makefile.in +Makefile +.deps +.cvsignore diff --git a/sparc/.cvsignore b/sparc/.cvsignore index 282522d..051d1bd 100644 --- a/sparc/.cvsignore +++ b/sparc/.cvsignore @@ -1,2 +1,3 @@ Makefile Makefile.in +.deps -- cgit v1.1 From 6519bafeae686f3b32870a17dc1c84ae90ec95f9 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 3 Sep 2003 08:10:57 +0000 Subject: 2003-09-03 Zoltan Varga * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). svn path=/trunk/mono/; revision=17839 --- ChangeLog | 4 ++++ x86/tramp.c | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index f553576..32994c8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2003-09-03 Zoltan Varga + + * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). + 2003-08-21 Zoltan Varga * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). diff --git a/x86/tramp.c b/x86/tramp.c index dab0d78..3a0d50c 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -493,8 +493,15 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_VOID: break; case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 1); break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 2); + break; case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I: -- cgit v1.1 From 0b0945abf1e873f6a8dfb527236d8cce2ce15574 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Mon, 13 Oct 2003 22:38:25 +0000 Subject: 2003-10-13 Bernie Solomon * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation for V9 (64 bit), cover more 32 bit cases as well. svn path=/trunk/mono/; revision=18995 --- ChangeLog | 5 + sparc/sparc-codegen.h | 102 ++++++++++- sparc/tramp.c | 494 ++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 483 insertions(+), 118 deletions(-) diff --git a/ChangeLog b/ChangeLog index 32994c8..c574ab4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2003-10-13 Bernie Solomon + + * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation + for V9 (64 bit), cover more 32 bit cases as well. + 2003-09-03 Zoltan Varga * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index b2a5836..01c1529 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -1,6 +1,12 @@ #ifndef __SPARC_CODEGEN_H__ #define __SPARC_CODEGEN_H__ +#if SIZEOF_VOID_P == 8 +#define SPARCV9 1 +#else +#define SPARCV9 0 +#endif + typedef enum { sparc_r0 = 0, sparc_r1 = 1, @@ -230,6 +236,17 @@ typedef struct { unsigned int op3 : 6; unsigned int rs1 : 5; unsigned int i : 1; + unsigned int x : 1; + unsigned int asi : 7; + unsigned int rs2 : 5; +} sparc_format3ax; + +typedef struct { + unsigned int op : 2; /* 2 or 3 */ + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int i : 1; unsigned int imm : 13; } sparc_format3b; @@ -238,6 +255,16 @@ typedef struct { unsigned int rd : 5; unsigned int op3 : 6; unsigned int rs1 : 5; + unsigned int i : 1; + unsigned int x : 1; + unsigned int imm : 12; +} sparc_format3bx; + +typedef struct { + unsigned int op : 2; /* 2 or 3 */ + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; unsigned int opf : 9; unsigned int rs2 : 5; } sparc_format3c; @@ -287,6 +314,20 @@ typedef struct { (ins) = (unsigned int*)__f + 1; \ } while (0) +#define sparc_encode_format3ax(ins,opval,asival,r1,r2,oper,dest) \ + do { \ + sparc_format3ax *__f = (sparc_format3ax*)(ins); \ + __f->op = (opval); \ + __f->asi = (asival); \ + __f->i = 0; \ + __f->x = 1; \ + __f->rd = (dest); \ + __f->rs1 = (r1); \ + __f->rs2 = (r2); \ + __f->op3 = (oper); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + #define sparc_encode_format3b(ins,opval,r1,val,oper,dest) \ do { \ sparc_format3b *__f = (sparc_format3b*)(ins); \ @@ -299,6 +340,19 @@ typedef struct { (ins) = (unsigned int*)__f + 1; \ } while (0) +#define sparc_encode_format3bx(ins,opval,r1,val,oper,dest) \ + do { \ + sparc_format3bx *__f = (sparc_format3bx*)(ins); \ + __f->op = (opval); \ + __f->imm = (val); \ + __f->i = 1; \ + __f->x = 1; \ + __f->rd = (dest); \ + __f->rs1 = (r1); \ + __f->op3 = (oper); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + #define sparc_encode_format3c(ins,opval,opfval,r1,oper,r2,dest) \ do { \ sparc_format3c *__f = (sparc_format3c*)(ins); \ @@ -330,6 +384,11 @@ typedef struct { #define sparc_ld(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),0,(dest)) #define sparc_ld_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),0,(dest)) +#if SPARCV9 +#define sparc_ldx(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),11,(dest)) +#define sparc_ldx_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),11,(dest)) +#endif + #define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest)) #define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest)) @@ -349,6 +408,11 @@ typedef struct { #define sparc_st(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),4,(src)) #define sparc_st_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),4,(src)) +#if SPARCV9 +#define sparc_stx(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),14,(src)) +#define sparc_stx_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),14,(src)) +#endif + #define sparc_std(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),7,(src)) #define sparc_std_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),7,(src)) @@ -409,7 +473,7 @@ typedef struct { #define sparc_fsubs(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubs_val, r2, dest ) #define sparc_fsubd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubd_val, r2, dest ) -b#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, dest ) +#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, dest ) #define sparc_fmuls( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuls_val, r2, dest ) #define sparc_fmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuld_val, r2, dest ) @@ -479,9 +543,19 @@ b#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, #define sparc_sll(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),37,(dest)) #define sparc_sll_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),37,(dest)) +#if SPARCV9 +#define sparc_sllx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),37,(dest)) +#define sparc_sllx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),37,(dest)) +#endif + #define sparc_srl(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),38,(dest)) #define sparc_srl_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),38,(dest)) +#if SPARCV9 +#define sparc_srlx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),38,(dest)) +#define sparc_srlx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),38,(dest)) +#endif + #define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest)) #define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest)) @@ -535,16 +609,30 @@ b#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, #define sparc_set(ins,val,reg) \ do { \ - if (((val) & 0x1fff) == 0) \ - sparc_sethi((ins),(val),(reg)); \ - else if (((val) >= -4096) && ((val) <= 4095)) \ - sparc_or_imm((ins),FALSE,sparc_g0,(val),(reg)); \ + if (((guint32)(val) & 0x1fff) == 0) \ + sparc_sethi((ins),(guint32)(val),(reg)); \ + else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \ + sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \ else { \ - sparc_sethi((ins),(val),(reg)); \ - sparc_or_imm((ins),FALSE,(reg),(val)&0x3ff,(reg)); \ + sparc_sethi((ins),(guint32)(val),(reg)); \ + sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \ } \ } while (0) +#if SPARCV9 +#define sparc_set_ptr(ins,ptr,reg) \ + do { \ + guint32 top_word = ((guint64)ptr) >> 32; \ + guint32 bottom_word = ((guint64)ptr) & 0xffffffff; \ + sparc_set((ins),top_word,sparc_g1); \ + sparc_set((ins),bottom_word,(reg)); \ + sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \ + sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \ + } while (0) +#else +#define sparc_set_ptr(ins,val,reg) sparc_set(ins,val,reg) +#endif + #define sparc_not(ins,reg) sparc_xnor((ins),FALSE,(reg),sparc_g0,(reg)) #define sparc_neg(ins,reg) sparc_sub((ins),FALSE,sparc_g0,(reg),(reg)) #define sparc_clr_reg(ins,reg) sparc_or((ins),FALSE,sparc_g0,sparc_g0,(reg)) diff --git a/sparc/tramp.c b/sparc/tramp.c index 82e6fed..7dae8e9 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -19,23 +19,40 @@ #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" #include "mono/metadata/debug-helpers.h" +#include "mono/metadata/marshal.h" #define ARG_SIZE sizeof (stackval) #define PROLOG_INS 1 -#define CALL_INS 2 /* Max 2. 1 for the jmpl and 1 for the nop */ +#define CALL_INS 3 /* Max 3. 1 for the jmpl and 1 for the nop and 1 for the possible unimp */ #define EPILOG_INS 2 -#define MINIMAL_STACK_SIZE 23 #define FLOAT_REGS 32 #define OUT_REGS 6 #define LOCAL_REGS 8 +#define SLOT_SIZE sizeof(gpointer) +#if SPARCV9 +#define MINIMAL_STACK_SIZE 22 +#define BIAS 2047 +#define FRAME_ALIGN 16 +#else +#define MINIMAL_STACK_SIZE 23 +#define BIAS 0 +#define FRAME_ALIGN 8 +#endif #define NOT_IMPL(x) g_error("FIXME: %s", x); /*#define DEBUG(a) a*/ #define DEBUG(a) /* Some assembly... */ +#ifdef __GNUC__ #define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory") +#else +static void flushi(void *addr) +{ + asm("flush %i0"); +} +#endif static char* sig_to_name (MonoMethodSignature *sig, const char *prefix) @@ -43,6 +60,7 @@ sig_to_name (MonoMethodSignature *sig, const char *prefix) int i; char *result; GString *res = g_string_new (""); + char *p; if (prefix) { g_string_append (res, prefix); @@ -56,12 +74,23 @@ sig_to_name (MonoMethodSignature *sig, const char *prefix) mono_type_get_desc (res, sig->params [i], TRUE); } result = res->str; + p = result; + /* remove chars Sun's asssembler doesn't like */ + while (*p != '\0') { + if (*p == '.' || *p == '/') + *p = '_'; + else if (*p == '&') + *p = '$'; + else if (*p == '[' || *p == ']') + *p = 'X'; + p++; + } g_string_free (res, FALSE); return result; } static void -sparc_disassemble_code (guint32 *code_buffer, guint32 *p, char *id) +sparc_disassemble_code (guint32 *code_buffer, guint32 *p, const char *id) { guchar *cp; FILE *ofd; @@ -71,12 +100,21 @@ sparc_disassemble_code (guint32 *code_buffer, guint32 *p, char *id) fprintf (ofd, "%s:\n", id); - for (cp = code_buffer; cp < p; cp++) + for (cp = (guchar *)code_buffer; cp < (guchar *)p; cp++) fprintf (ofd, ".byte %d\n", *cp); fclose (ofd); +#ifdef __GNUC__ system ("as /tmp/test.s -o /tmp/test.o;objdump -d /tmp/test.o"); +#else + /* this assumes we are using Sun tools as we aren't GCC */ +#if SPARCV9 + system ("as -xarch=v9 /tmp/test.s -o /tmp/test.o;dis /tmp/test.o"); +#else + system ("as /tmp/test.s -o /tmp/test.o;dis /tmp/test.o"); +#endif +#endif } @@ -85,14 +123,14 @@ add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) { if (simple) { if (*gr >= OUT_REGS) { - *stack_size += 4; + *stack_size += SLOT_SIZE; *code_size += 12; } else { *code_size += 4; } } else { if (*gr >= OUT_REGS - 1) { - *stack_size += 8 + (*stack_size % 8); + *stack_size += 8 + (*stack_size % 8); /* ???64 */ *code_size += 16; } else { *code_size += 16; @@ -110,7 +148,7 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, guint32 simpletype; fr = gr = 0; - *stack_size = MINIMAL_STACK_SIZE * 4; + *stack_size = MINIMAL_STACK_SIZE * SLOT_SIZE; *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS) * 4; /* function arguments */ @@ -125,6 +163,10 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, simpletype = sig->params[i]->type; enum_calc_size: switch (simpletype) { + case MONO_TYPE_R4: +#if SPARCV9 + (*code_size) += 4; /* for the fdtos */ +#endif case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: @@ -139,7 +181,6 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: - case MONO_TYPE_R4: case MONO_TYPE_SZARRAY: add_general (&gr, stack_size, code_size, TRUE); break; @@ -161,7 +202,11 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, } } else { add_general (&gr, stack_size, code_size, TRUE); +#if SPARCV9 + *code_size += 8; +#else *code_size += 4; +#endif } break; } @@ -204,13 +249,23 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, case MONO_TYPE_I8: *code_size += 12; break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; if (sig->ret->data.klass->enumtype) { simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } + size = mono_class_native_size (sig->ret->data.klass, NULL); +#if SPARCV9 + if (size <= 32) + *code_size += 8 + (size + 7) / 2; + else + *code_size += 8; +#else *code_size += 8; +#endif break; + } case MONO_TYPE_VOID: break; default: @@ -222,22 +277,28 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, *stack_size += 8; *code_size += 24; if (sig->hasthis) { - *stack_size += 4; + *stack_size += SLOT_SIZE; *code_size += 4; } } - *stack_size = (*stack_size + 7) & (~7); + *stack_size = (*stack_size + (FRAME_ALIGN - 1)) & (~(FRAME_ALIGN -1)); } static inline guint32 * emit_epilog (guint32 *p, MonoMethodSignature *sig, guint stack_size) { + int ret_offset = 8; + /* * Standard epilog. * 8 may be 12 when returning structures (to skip unimp opcode). */ - sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero); +#if !SPARCV9 + if (sig != NULL && !sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) + ret_offset = 12; +#endif + sparc_jmpl_imm (p, sparc_i7, ret_offset, sparc_zero); sparc_restore (p, sparc_zero, sparc_zero, sparc_zero); return p; @@ -251,28 +312,73 @@ emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size) return p; } +#if SPARCV9 +#define sparc_st_ptr(a,b,c,d) sparc_stx(a,b,c,d) +#define sparc_st_imm_ptr(a,b,c,d) sparc_stx_imm(a,b,c,d) +#define sparc_ld_ptr(a,b,c,d) sparc_ldx(a,b,c,d) +#define sparc_ld_imm_ptr(a,b,c,d) sparc_ldx_imm(a,b,c,d) +#else +#define sparc_st_ptr(a,b,c,d) sparc_st(a,b,c,d) +#define sparc_st_imm_ptr(a,b,c,d) sparc_st_imm(a,b,c,d) +#define sparc_ld_ptr(a,b,c,d) sparc_ld(a,b,c,d) +#define sparc_ld_imm_ptr(a,b,c,d) sparc_ld_imm(a,b,c,d) +#endif + +/* synonyms for when values are really widened scalar values */ +#define sparc_st_imm_word sparc_st_imm_ptr + #define ARG_BASE sparc_i3 /* pointer to args in i3 */ -#define SAVE_4_IN_GENERIC_REGISTER \ +#define SAVE_PTR_IN_GENERIC_REGISTER \ if (gr < OUT_REGS) { \ - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \ + sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \ gr++; \ } else { \ - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ - sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); \ - stack_par_pos += 4; \ + sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ + sparc_st_imm_ptr (p, sparc_l0, sparc_sp, stack_par_pos); \ + stack_par_pos += SLOT_SIZE; \ } -#define SAVE_4_VAL_IN_GENERIC_REGISTER \ - if (gr < OUT_REGS) { \ - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ - sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr); \ - gr++; \ - } else { \ - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \ - sparc_ld_imm (p, sparc_l0, 0, sparc_l0); \ - sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); \ - stack_par_pos += 4; \ - } +#if SPARCV9 +/* This is a half hearted attempt at coping with structs by value - the + actual convention is complicated when floats & doubles are involved as + you end up with fields in different registers on/off the stack. + It will take more time to get right... */ +static guint32 * +v9_struct_arg(guint32 *p, int arg_index, MonoClass *klass, int size, guint *p_gr) +{ + MonoMarshalType *info = mono_marshal_load_type_info (klass); + int off = 0; + int index = 0; + guint gr = *p_gr; + sparc_ld_imm_ptr (p, ARG_BASE, arg_index*ARG_SIZE, sparc_l0); + if (size > 8) { + if (info->fields [index].field->type->type == MONO_TYPE_R8) { + sparc_lddf_imm (p, sparc_l0, 0, sparc_f0 + 2 * gr); + index++; + } + else { + sparc_ldx_imm (p, sparc_l0, 0, sparc_o0 + gr); + index++; /* FIXME could be multiple fields in one register */ + } + gr++; + size -= 8; + off = 8; + } + if (size > 0) { + if (info->fields [index].field->type->type == MONO_TYPE_R8) { + sparc_lddf_imm (p, sparc_l0, off, sparc_f0 + 2 * gr); + index++; + } + else { + /* will load extra garbage off end of short structs ... */ + sparc_ldx_imm (p, sparc_l0, off, sparc_o0 + gr); + } + gr++; + } + *p_gr = gr; + return p; +} +#endif static inline guint32* emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, @@ -282,7 +388,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, guint32 simpletype; fr = gr = 0; - stack_par_pos = MINIMAL_STACK_SIZE * 4; + stack_par_pos = MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS; if (sig->hasthis) { if (use_memcpy) { @@ -305,12 +411,12 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (size != 4) { /* need to call memcpy here */ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); - sparc_ld_imm (p, sparc_i3, i*16, sparc_o1); + sparc_ld_imm_ptr (p, sparc_i3, i*16, sparc_o1); sparc_set (p, (guint32)size, sparc_o2); - sparc_set (p, (guint32)memmove, sparc_l0); + sparc_set_ptr (p, (void *)memmove, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); - stack_par_pos += (size + 3) & (~3); + stack_par_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1)); } } } @@ -322,13 +428,14 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, gint size = mono_class_native_size (klass, NULL); DEBUG(fprintf(stderr, "retval value type size: %d\n", size)); - if (size > 8) { - sparc_ld_imm (p, sparc_sp, stack_size - 12, - sparc_o0); - sparc_ld_imm (p, sparc_o0, 0, sparc_o0); - gr ++; - } else { - g_error ("FIXME: size <= 8 not implemented"); +#if SPARCV9 + if (size > 32) { +#else + { +#endif + /* pass on buffer in interp.c to called function */ + sparc_ld_imm_ptr (p, sparc_i1, 0, sparc_l0); + sparc_st_imm_ptr (p, sparc_l0, sparc_sp, 64); } } } @@ -337,7 +444,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, for (i = 0; i < sig->param_count; i++) { if (sig->params[i]->byref) { - SAVE_4_IN_GENERIC_REGISTER; + SAVE_PTR_IN_GENERIC_REGISTER; continue; } simpletype = sig->params[i]->type; @@ -351,7 +458,25 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: +#if !SPARCV9 case MONO_TYPE_R4: +#endif + if (gr < OUT_REGS) { + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); + gr++; + } else { + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + } + break; +#if SPARCV9 + case MONO_TYPE_R4: + sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f30); /* fix using this fixed reg */ + sparc_fdtos(p, sparc_f30, sparc_f0 + 2 * gr + 1); + gr++; + break; +#endif case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: @@ -359,35 +484,72 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: - SAVE_4_IN_GENERIC_REGISTER; + SAVE_PTR_IN_GENERIC_REGISTER; break; case MONO_TYPE_VALUETYPE: { gint size; - if (sig->params[i]->data.klass->enumtype) { - simpletype = sig->params[i]->data.klass->enum_basetype->type; + MonoClass *klass = sig->params[i]->data.klass; + if (klass->enumtype) { + simpletype = klass->enum_basetype->type; goto enum_calc_size; } - size = mono_class_native_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (klass, NULL); +#if SPARCV9 + if (size <= 16) { + if (gr < OUT_REGS) { + p = v9_struct_arg(p, i, klass, size, &gr); + } else { + sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_ld_imm (p, sparc_l0, 0, sparc_l0); + sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + } +#else if (size == 4) { - SAVE_4_VAL_IN_GENERIC_REGISTER; + if (gr < OUT_REGS) { + sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr); + gr++; + } else { + sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_ld_imm (p, sparc_l0, 0, sparc_l0); + sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + } +#endif } else { if (gr < OUT_REGS) { sparc_add_imm (p, 0, sparc_sp, cur_struct_pos, sparc_o0 + gr); gr ++; } else { - sparc_ld_imm (p, sparc_sp, + sparc_ld_imm_ptr (p, sparc_sp, cur_struct_pos, sparc_l1); - sparc_st_imm (p, sparc_l1, + sparc_st_imm_ptr (p, sparc_l1, sparc_sp, stack_par_pos); } - cur_struct_pos += (size + 3) & (~3); + cur_struct_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1)); } break; } +#if SPARCV9 + case MONO_TYPE_I8: + if (gr < OUT_REGS) { + sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); + gr++; + } else { + sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_stx_imm (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + } + break; + case MONO_TYPE_R8: + sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0 + 2 * i); + break; +#else case MONO_TYPE_I8: case MONO_TYPE_R8: /* this will break in subtle ways... */ @@ -408,6 +570,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, NOT_IMPL("FIXME: I8/R8 on stack"); } break; +#endif default: g_error ("Can't trampoline 0x%x", sig->params[i]->type); } @@ -435,10 +598,16 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, /* call "callme" */ sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite); sparc_nop (p); +#if !SPARCV9 + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) { + int size = mono_class_native_size (sig->ret->data.klass, NULL); + sparc_unimp (p, size & 4095); + } +#endif /* get return value */ if (sig->ret->byref || string_ctor) { - sparc_st (p, sparc_o0, sparc_i1, 0); + sparc_st_ptr (p, sparc_o0, sparc_i1, 0); } else { simpletype = sig->ret->type; enum_retval: @@ -455,6 +624,8 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, break; case MONO_TYPE_I4: case MONO_TYPE_U4: + sparc_st (p, sparc_o0, sparc_i1, 0); + break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_CLASS: @@ -463,7 +634,7 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: case MONO_TYPE_PTR: - sparc_st (p, sparc_o0, sparc_i1, 0); + sparc_st_ptr (p, sparc_o0, sparc_i1, 0); break; case MONO_TYPE_R4: sparc_stf (p, sparc_f0, sparc_i1, 0); @@ -472,13 +643,51 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, sparc_stdf (p, sparc_f0, sparc_i1, 0); break; case MONO_TYPE_I8: +#if SPARCV9 + sparc_stx (p, sparc_o0, sparc_i1, 0); +#else sparc_std (p, sparc_o0, sparc_i1, 0); +#endif break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; if (sig->ret->data.klass->enumtype) { simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retval; } +#if SPARCV9 + size = mono_class_native_size (sig->ret->data.klass, NULL); + if (size <= 32) { + int n_regs = size / 8; + int j; + sparc_ldx_imm (p, sparc_i1, 0, sparc_i1); + /* wrong if there are floating values in the struct... */ + for (j = 0; j < n_regs; j++) { + sparc_stx_imm (p, sparc_o0 + j, sparc_i1, j * 8); + } + size -= n_regs * 8; + if (size > 0) { + int last_reg = sparc_o0 + n_regs; + /* get value right aligned in register */ + sparc_srlx_imm(p, last_reg, 64 - 8 * size, last_reg); + if ((size & 1) != 0) { + sparc_stb_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 1); + size--; + if (size > 0) + sparc_srlx_imm(p, last_reg, 8, last_reg); + } + if ((size & 2) != 0) { + sparc_sth_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 2); + size -= 2; + if (size > 0) + sparc_srlx_imm(p, last_reg, 16, last_reg); + } + if ((size & 4) != 0) + sparc_st_imm (p, last_reg, sparc_i1, n_regs * 8); + } + } +#endif + } case MONO_TYPE_VOID: break; default: @@ -511,9 +720,12 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) p = emit_prolog (p, sig, stack_size); p = emit_save_parameters (p, sig, stack_size, use_memcpy); p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); - p = emit_epilog (p, sig, stack_size); + /* we don't return structs here so pass in NULL as signature */ + p = emit_epilog (p, NULL, stack_size); + + g_assert(p <= code_buffer + (code_size / 4)); - //sparc_disassemble_code (code_buffer, p, sig_to_name(sig, NULL)); + DEBUG(sparc_disassemble_code (code_buffer, p, sig_to_name(sig, NULL))); /* So here's the deal... * UltraSPARC will flush a whole cache line at a time @@ -529,15 +741,16 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) return (MonoPIFunc)code_buffer; } -#define MINV_POS (MINIMAL_STACK_SIZE * 4) +#define MINV_POS (MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS) + void * mono_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; guint stack_size, code_size, stackval_arg_pos, local_pos; - guint i, local_start, reg_param, stack_param, this_flag, cpos, vt_cur; - guint align = 0; + guint i, local_start, reg_param = 0, stack_param, cpos, vt_cur; + guint32 align = 0; guint32 *p, *code_buffer; gint *vtbuf; gint32 simpletype; @@ -552,7 +765,7 @@ mono_create_method_pointer (MonoMethod *method) return method->addr; } - code_size = 1024; + code_size = 1024; /* these should be calculated... */ stack_size = 1024; stack_param = 0; @@ -566,39 +779,33 @@ mono_create_method_pointer (MonoMethod *method) p = emit_prolog (p, sig, stack_size); /* fill MonoInvocation */ - sparc_st_imm (p, sparc_g0, sparc_sp, + sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); - sparc_st_imm (p, sparc_g0, sparc_sp, + sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); - sparc_st_imm (p, sparc_g0, sparc_sp, + sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); - sparc_st_imm (p, sparc_g0, sparc_sp, + sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); - sparc_set (p, (guint32)method, sparc_l0); - sparc_st_imm (p, sparc_l0, sparc_sp, + sparc_set_ptr (p, (void *)method, sparc_l0); + sparc_st_imm_ptr (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); - local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + - (sig->param_count + 1) * sizeof (stackval); + stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); + local_start = local_pos = stackval_arg_pos + (sig->param_count + 1) * sizeof (stackval); if (sig->hasthis) { - sparc_st_imm (p, sparc_i0, sparc_sp, + sparc_st_imm_ptr (p, sparc_i0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); reg_param = 1; - } else if (sig->param_count) { - sparc_st_imm (p, sparc_i0, sparc_sp, local_pos); - local_pos += 4; - reg_param = 0; - } - - this_flag = (sig->hasthis ? 1 : 0); + } if (sig->param_count) { - gint save_count = MIN (OUT_REGS, sig->param_count - 1); + gint save_count = MIN (OUT_REGS, sig->param_count + sig->hasthis); for (i = reg_param; i < save_count; i++) { - sparc_st_imm (p, sparc_i1 + i, sparc_sp, local_pos); - local_pos += 4; + sparc_st_imm_ptr (p, sparc_i0 + i, sparc_sp, local_pos); + local_pos += SLOT_SIZE; } } @@ -609,7 +816,7 @@ mono_create_method_pointer (MonoMethod *method) for (i = 0; i < sig->param_count; i++) { MonoType *type = sig->params [i]; vtbuf [i] = -1; - if (type->type == MONO_TYPE_VALUETYPE) { + if (!sig->params[i]->byref && type->type == MONO_TYPE_VALUETYPE) { MonoClass *klass = type->data.klass; gint size; @@ -622,77 +829,120 @@ mono_create_method_pointer (MonoMethod *method) cpos += size; } } - cpos += 3; - cpos &= ~3; + cpos += SLOT_SIZE - 1; + cpos &= ~(SLOT_SIZE - 1); local_pos += cpos; /* set MonoInvocation::stack_args */ - stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); - sparc_st_imm (p, sparc_l0, sparc_sp, + sparc_st_imm_ptr (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); /* add stackval arguments */ - /* something is bizzare here... */ for (i=0; i < sig->param_count; i++) { + int stack_offset; + int type; if (reg_param < OUT_REGS) { - sparc_add_imm (p, 0, sparc_sp, - local_start + (reg_param - this_flag)*4, - sparc_o2); + stack_offset = local_start + i * SLOT_SIZE; reg_param++; } else { - sparc_add_imm (p, 0, sparc_sp, - stack_size + 8 + stack_param, sparc_o2); + stack_offset = stack_size + 8 + stack_param; stack_param++; } + if (!sig->params[i]->byref) { + type = sig->params[i]->type; + enum_arg: + switch (type) { + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_PTR: + case MONO_TYPE_R8: + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + stack_offset += SLOT_SIZE - 4; + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + stack_offset += SLOT_SIZE - 2; + break; + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_BOOLEAN: + stack_offset += SLOT_SIZE - 1; + break; + case MONO_TYPE_VALUETYPE: + if (sig->params[i]->data.klass->enumtype) { + type = sig->params[i]->data.klass->enum_basetype->type; + goto enum_arg; + } + g_assert(vtbuf[i] >= 0); + break; + default: + g_error ("can not cope with delegate arg type %d", type); + } + } + + sparc_add_imm (p, 0, sparc_sp, stack_offset, sparc_o2); + if (vtbuf[i] >= 0) { sparc_add_imm (p, 0, sparc_sp, vt_cur, sparc_o1); - sparc_st_imm (p, sparc_o1, sparc_sp, stackval_arg_pos); + sparc_st_imm_ptr (p, sparc_o1, sparc_sp, stackval_arg_pos); sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_o1); - //sparc_ld (p, sparc_o2, 0, sparc_o2); + sparc_ld_imm_ptr (p, sparc_o2, 0, sparc_o2); vt_cur += vtbuf[i]; } else { sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_o1); } - sparc_set (p, (guint32)sig->params[i], sparc_o0); + sparc_set_ptr (p, (void *)sig->params[i], sparc_o0); sparc_set (p, (guint32)sig->pinvoke, sparc_o3); /* YOU make the CALL! */ - sparc_set (p, (guint32)stackval_from_data, sparc_l0); + sparc_set_ptr (p, (void *)stackval_from_data, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); - - if (sig->pinvoke) - stackval_arg_pos += 4 * - mono_type_native_stack_size (sig->params[i], - &align); - else - stackval_arg_pos += 4 * - mono_type_stack_size (sig->params[i], &align); + stackval_arg_pos += sizeof(stackval); } /* return value storage */ if (sig->param_count) { sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); } + if (!sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) { +#if !SPARCV9 + /* pass on callers buffer */ + sparc_ld_imm_ptr (p, sparc_fp, 64, sparc_l1); + sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0); +#else + sparc_add_imm (p, 0, sparc_l0, sizeof(stackval), sparc_l1); + sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0); +#endif + } - sparc_st_imm (p, sparc_l0, sparc_sp, + sparc_st_imm_ptr (p, sparc_l0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); /* call ves_exec_method */ sparc_add_imm (p, 0, sparc_sp, MINV_POS, sparc_o0); - sparc_set (p, (guint32)ves_exec_method, sparc_l0); + sparc_set_ptr (p, (void *)ves_exec_method, sparc_l0); sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite); sparc_nop (p); /* move retval from stackval to proper place (r3/r4/...) */ if (sig->ret->byref) { - sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0 ); + sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0 ); } else { enum_retvalue: switch (sig->ret->type) { @@ -701,38 +951,60 @@ mono_create_method_pointer (MonoMethod *method) case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - sparc_ldub_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); - break; case MONO_TYPE_I2: case MONO_TYPE_U2: - sparc_lduh_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); - break; case MONO_TYPE_I4: case MONO_TYPE_U4: + sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); + break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: - sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); + sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0); break; case MONO_TYPE_I8: + case MONO_TYPE_U8: +#if SPARCV9 + sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); +#else sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0); sparc_ld_imm (p, sparc_sp, stackval_arg_pos + 4, sparc_i1); +#endif break; case MONO_TYPE_R4: - sparc_ldf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0); + sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0); + sparc_fdtos(p, sparc_f0, sparc_f0); break; case MONO_TYPE_R8: sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0); break; - case MONO_TYPE_VALUETYPE: + case MONO_TYPE_VALUETYPE: { + gint size; + gint reg = sparc_i0; if (sig->ret->data.klass->enumtype) { simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; } - NOT_IMPL("value type as ret val from delegate"); +#if SPARCV9 + size = mono_class_native_size (sig->ret->data.klass, NULL); + sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_l0); + if (size <= 16) { + gint off = 0; + if (size >= 8) { + sparc_ldx_imm (p, sparc_l0, 0, reg); + size -= 8; + off += 8; + reg++; + } + if (size > 0) + sparc_ldx_imm (p, sparc_l0, off, reg); + } else + NOT_IMPL("value type as ret val from delegate"); +#endif break; + } default: g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); @@ -752,7 +1024,7 @@ mono_create_method_pointer (MonoMethod *method) mono_jit_info_table_add (mono_root_domain, ji); - sparc_disassemble_code (code_buffer, p, method->name); + DEBUG(sparc_disassemble_code (code_buffer, p, method->name)); DEBUG(fprintf(stderr, "Delegate [end emiting] %s\n", method->name)); -- cgit v1.1 From fa30eb232e53c9e39eec1bd44189e8ac29ba1644 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Mon, 13 Oct 2003 22:48:11 +0000 Subject: 2003-10-13 Bernie Solomon * hppa/tramp.c: add initial implementation - this is 64 bit only hppa/Makefile.am hppa/.cvsignore: added svn path=/trunk/mono/; revision=18996 --- ChangeLog | 5 + hppa/.cvsignore | 3 + hppa/Makefile.am | 7 + hppa/tramp.c | 980 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 995 insertions(+) create mode 100644 hppa/.cvsignore create mode 100644 hppa/Makefile.am create mode 100644 hppa/tramp.c diff --git a/ChangeLog b/ChangeLog index c574ab4..892b9b3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,10 @@ 2003-10-13 Bernie Solomon + * hppa/tramp.c: add initial implementation - this is 64 bit only + hppa/Makefile.am hppa/.cvsignore: added + +2003-10-13 Bernie Solomon + * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation for V9 (64 bit), cover more 32 bit cases as well. diff --git a/hppa/.cvsignore b/hppa/.cvsignore new file mode 100644 index 0000000..051d1bd --- /dev/null +++ b/hppa/.cvsignore @@ -0,0 +1,3 @@ +Makefile +Makefile.in +.deps diff --git a/hppa/Makefile.am b/hppa/Makefile.am new file mode 100644 index 0000000..a867bcd --- /dev/null +++ b/hppa/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-hppa.la + +libmonoarch_hppa_la_SOURCES = tramp.c + diff --git a/hppa/tramp.c b/hppa/tramp.c new file mode 100644 index 0000000..8302a14 --- /dev/null +++ b/hppa/tramp.c @@ -0,0 +1,980 @@ +/* + Copyright (c) 2003 Bernie Solomon + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + Trampoline generation for HPPA - currently (Oct 9th 2003) only + supports 64 bits - and the HP compiler. +*/ +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" +#include "mono/metadata/tabledefs.h" + +#if SIZEOF_VOID_P != 8 +#error "HPPA code only currently supports 64bit pointers" +#endif + +// debugging flag which dumps code generated +static int debug_asm = 0; + +#define NOP 0x08000240 + +#define LDB(disp, base, dest, neg) (0x40000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) +#define STB(src, disp, base, neg) (0x60000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) + +#define LDH(disp, base, dest, neg) (0x44000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) +#define STH(src, disp, base, neg) (0x64000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) + +#define LDW(disp, base, dest, neg) (0x48000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) +#define STW(src, disp, base, neg) (0x68000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) + +#define COPY(src, dest) (0x34000000 | ((src) << 21) | ((dest) << 16)) +#define LDD(im10a, base, dest, m, a, neg) (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) +#define STD(src, im10a, base, m , a, neg) (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) + +#define FLDD(im10a, base, dest, m, a, neg) (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) +#define FSTD(src, im10a, base, m , a, neg) (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) + +#define FLDW(im11a, base, dest, r, neg) (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)) +#define FSTW(src, im11a, base, r, neg) (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)) + +/* only works on right half SP registers */ +#define FCNV(src, ssng, dest, dsng) (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)) + +#define LDIL(im21, dest) (0x20000000 | im21 | ((dest) << 21)) + +#define LDO(off, base, dest, neg) (0x34000000 | (((off) & 0x1fff)) << 1 | ((base) << 21) | ((dest) << 16) | neg) + +#define EXTRDU(src, pos, len, dest) (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))) + +#define BVE(reg, link) (0xE8001000 | ((link ? 7 : 6) << 13) | ((reg) << 21)) + +static unsigned int gen_copy(int src, int dest) +{ + if (debug_asm) + fprintf(stderr, "COPY %d,%d\n", src, dest); + return COPY(src, dest); +} + +static unsigned int gen_ldb(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "LDB %d(%d),%d\n", disp, base, dest); + return LDB(disp, base, dest, neg); +} + +static unsigned int gen_stb(int src, int disp, int base) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "STB %d,%d(%d)\n", src, disp, base); + return STB(src, disp, base, neg); +} + +static unsigned int gen_ldh(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "LDH %d(%d),%d\n", disp, base, dest); + g_assert((disp & 1) == 0); + return LDH(disp, base, dest, neg); +} + +static unsigned int gen_sth(int src, int disp, int base) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "STH %d,%d(%d)\n", src, disp, base); + g_assert((disp & 1) == 0); + return STH(src, disp, base, neg); +} + +static unsigned int gen_ldw(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "LDW %d(%d),%d\n", disp, base, dest); + g_assert((disp & 3) == 0); + return LDW(disp, base, dest, neg); +} + +static unsigned int gen_stw(int src, int disp, int base) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "STW %d,%d(%d)\n", src, disp, base); + g_assert((disp & 3) == 0); + return STW(src, disp, base, neg); +} + +static unsigned int gen_ldd(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "LDD %d(%d),%d\n", disp, base, dest); + g_assert((disp & 7) == 0); + return LDD(disp >> 3, base, dest, 0, 0, neg); +} + +static unsigned int gen_lddmb(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "LDD,MB %d(%d),%d\n", disp, base, dest); + g_assert((disp & 7) == 0); + return LDD(disp >> 3, base, dest, 1, 1, neg); +} + +static unsigned int gen_std(int src, int disp, int base) +{ + int neg = disp < 0; + g_assert((disp & 7) == 0); + if (debug_asm) + fprintf(stderr, "STD %d,%d(%d)\n", src, disp, base); + return STD(src, disp >> 3, base, 0, 0, neg); +} + +static unsigned int gen_fldd(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "FLDD %d(%d),%d\n", disp, base, dest); + g_assert((disp & 7) == 0); + return FLDD(disp >> 3, base, dest, 0, 0, neg); +} + +static unsigned int gen_fstd(int src, int disp, int base) +{ + int neg = disp < 0; + g_assert((disp & 7) == 0); + if (debug_asm) + fprintf(stderr, "FSTD %d,%d(%d)\n", src, disp, base); + return FSTD(src, disp >> 3, base, 0, 0, neg); +} + +static unsigned int gen_fldw(int disp, int base, int dest) +{ + int neg = disp < 0; + if (debug_asm) + fprintf(stderr, "FLDW %d(%d),%dr\n", disp, base, dest); + g_assert((disp & 3) == 0); + return FLDW(disp >> 2, base, dest, 1, neg); +} + +static unsigned int gen_fstw(int src, int disp, int base) +{ + int neg = disp < 0; + g_assert((disp & 3) == 0); + if (debug_asm) + fprintf(stderr, "FSTW %dr,%d(%d)\n", src, disp, base); + return FSTW(src, disp >> 2, base, 1, neg); +} + +static unsigned int gen_fcnv_dbl_sng(int src, int dest) +{ + if (debug_asm) + fprintf(stderr, "FCNV,DBL,SGL %d,%dr\n", src, dest); + return FCNV(src, 0, dest, 1); +} + +static unsigned int gen_fcnv_sng_dbl(int src, int dest) +{ + if (debug_asm) + fprintf(stderr, "FCNV,SGL,DBL %dr,%d\n", src, dest); + return FCNV(src, 1, dest, 0); +} + +static unsigned int gen_stdma(int src, int disp, int base) +{ + int neg = disp < 0; + g_assert((disp & 7) == 0); + if (debug_asm) + fprintf(stderr, "STD,MA %d,%d(%d)\n", src, disp, base); + return STD(src, disp >> 3, base, 1, 0, neg); +} + +/* load top 21 bits of val into reg */ +static unsigned int gen_ldil(unsigned int val, int reg) +{ + unsigned int t = (val >> 11) & 0x1fffff; + unsigned int im21 = ((t & 0x7c) << 14) | ((t & 0x180) << 7) | ((t & 0x3) << 12) | ((t & 0xffe00) >> 8) | ((t & 0x100000) >> 20); + return LDIL(reg, im21); +} + +static unsigned int gen_ldo(int off, int base, int reg) +{ + int neg = off < 0; + if (debug_asm) + fprintf(stderr, "LDO %d(%d),%d\n", off, base, reg); + return LDO(off, base, reg, neg); +} + +static unsigned int gen_nop(void) +{ + if (debug_asm) + fprintf(stderr, "NOP\n"); + return NOP; +} + +static unsigned int gen_bve(int reg, int link) +{ + if (debug_asm) + fprintf(stderr, "BVE%s (%d)%s\n", link ? ",L" : "", reg, link ? ",2" : ""); + return BVE(reg, link); +} + +static unsigned int gen_extrdu(int src, int pos, int len, int dest) +{ + if (debug_asm) + fprintf(stderr, "EXTRD,U %d,%d,%d,%d\n", src, pos, len, dest); + return EXTRDU(src, pos, len, dest); +} + +static void flush_cache(void *address, int length) +{ +#ifdef __GNUC__ +#error "currently only supports the HP C compiler" +#else + int cache_line_size = 16; + ulong_t end = (ulong_t)address + length; + register ulong_t sid; + register ulong_t offset = (ulong_t) address; + register ulong_t r0 = 0; + + _asm("LDSID", 0, offset, sid); + _asm("MTSP", sid, 0); + _asm("FDC", r0, 0, offset); + offset = (offset + (cache_line_size - 1)) & ~(cache_line_size - 1); + while (offset < end) { + (void)_asm("FDC", r0, 0, offset); + offset += cache_line_size; + } + _asm("SYNC"); + offset = (ulong_t) address; + _asm("FIC", r0, 0, offset); + offset = (offset + (cache_line_size - 1)) & ~(cache_line_size - 1); + while (offset < end) { + (void)_asm("FIC", r0, 0, offset); + offset += cache_line_size; + } + _asm("SYNC"); + // sync needs at least 7 instructions after it... this is what is used for NOP + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); + _asm("OR", 0, 0, 0); +#endif +} + +#define ADD_INST(code, pc, gen_exp) ((code) == NULL ? (pc)++ : (code[(pc)++] = (gen_exp))) + +/* + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + */ + +MonoPIFunc +mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + int pc, save_pc; + int param; + void **descriptor; + unsigned int *code = NULL; + int arg_reg; +#define FP_ARG_REG(r) (4 + (26 - arg_reg)) + int arg_offset; + int frame_size = 0; + int spill_offset; + int parameter_offset; + int parameter_slot; + int args_on_stack; + + if (debug_asm) { + fprintf(stderr, "trampoline: # params %d has this %d exp this %d string %d, ret type %d\n", + sig->param_count, sig->hasthis, sig->explicit_this, string_ctor, sig->ret->type); + } + + // everything takes 8 bytes unless it is a bigger struct + for (param = 0; param < sig->param_count; param++) { + if (sig->params[param]->byref) + frame_size += 8; + else { + if (sig->params[param]->type != MONO_TYPE_VALUETYPE) + frame_size += 8; + else { + if (sig->params [param]->data.klass->enumtype) + frame_size += 8; + else { + frame_size += 15; // large structs are 16 byte aligned + frame_size &= ~15; + frame_size += mono_class_native_size (sig->params [param]->data.klass, NULL); + frame_size += 7; + frame_size &= ~7; + } + } + } + } + + if (sig->hasthis) + frame_size += 8; + // 16 byte alignment + if ((frame_size & 15) != 0) + frame_size += 8; + // minimum is 64 bytes + if (frame_size < 64) + frame_size = 64; + + if (debug_asm) + fprintf(stderr, "outgoing frame size: %d\n", frame_size); + + frame_size += 16; // for the frame marker (called routines stuff return address etc. here) + frame_size += 32; // spill area for r4, r5 and r27 (16 byte aligned) + + spill_offset = -frame_size; + parameter_offset = spill_offset + 32; // spill area size is really 24 + spill_offset += 8; + + /* the rest executes twice - once to count instructions so we can + allocate memory in one block and once to fill it in... the count + should be pretty fast anyway... + */ +generate: + pc = 0; + arg_reg = 26; + arg_offset = 0; + args_on_stack = 0; + parameter_slot = parameter_offset; + + ADD_INST(code, pc, gen_std(2, -16, 30)); // STD %r2,-16(%r30) + ADD_INST(code, pc, gen_stdma(3, frame_size, 30)); + ADD_INST(code, pc, gen_std(4, spill_offset, 30)); + ADD_INST(code, pc, gen_std(5, spill_offset + 8, 30)); + ADD_INST(code, pc, gen_copy(29, 3)); // COPY %r29,%r3 + ADD_INST(code, pc, gen_std(27, spill_offset + 16, 30)); + ADD_INST(code, pc, gen_nop()); // NOP + + ADD_INST(code, pc, gen_std(26, -64, 29)); // STD %r26,-64(%r29) callme + ADD_INST(code, pc, gen_std(25, -56, 29)); // STD %r25,-56(%r29) retval + ADD_INST(code, pc, gen_std(24, -48, 29)); // STD %r24,-48(%r29) this_obj + ADD_INST(code, pc, gen_std(23, -40, 29)); // STD %r23,-40(%r29) arguments + + if (sig->param_count > 0) + ADD_INST(code, pc, gen_copy(23, 4)); // r4 is the current pointer to the stackval array of args + + if (sig->hasthis) { + if (sig->call_convention != MONO_CALL_THISCALL) { + ADD_INST(code, pc, gen_copy(24, arg_reg)); + --arg_reg; + parameter_slot += 8; + } else { + fprintf(stderr, "case I didn't handle\n"); + } + } + + for (param = 0; param < sig->param_count; param++) { + int type = sig->params[param]->type; + if (sig->params[param]->byref) { + if (args_on_stack) { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + } else { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); + --arg_reg; + } + arg_offset += sizeof(stackval); + parameter_slot += 8; + continue; + } + typeswitch: + switch (type) { + case MONO_TYPE_CHAR: + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + if (args_on_stack) { + ADD_INST(code, pc, gen_ldw(arg_offset, 4, 5)); + switch (type) { + case MONO_TYPE_I4: + case MONO_TYPE_U4: + ADD_INST(code, pc, gen_stw(5, parameter_slot + 4, 30)); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + ADD_INST(code, pc, gen_sth(5, parameter_slot + 6, 30)); + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + ADD_INST(code, pc, gen_stb(5, parameter_slot + 7, 30)); + break; + } + } else { + ADD_INST(code, pc, gen_ldw(arg_offset, 4, arg_reg)); + --arg_reg; + } + arg_offset += sizeof(stackval); + parameter_slot += 8; + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_PTR: + if (args_on_stack) { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + } else { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); + --arg_reg; + } + arg_offset += sizeof(stackval); + parameter_slot += 8; + break; + case MONO_TYPE_R8: + if (args_on_stack) { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + } else { + ADD_INST(code, pc, gen_fldd(arg_offset, 4, FP_ARG_REG(arg_reg))); + --arg_reg; + } + arg_offset += sizeof(stackval); + parameter_slot += 8; + break; + case MONO_TYPE_R4: + if (args_on_stack) { + ADD_INST(code, pc, gen_fldd(arg_offset, 4, 22)); + ADD_INST(code, pc, gen_fcnv_dbl_sng(22, 22)); + ADD_INST(code, pc, gen_fstw(22, parameter_slot + 4, 30)); + } else { + ADD_INST(code, pc, gen_fldd(arg_offset, 4, FP_ARG_REG(arg_reg))); + ADD_INST(code, pc, gen_fcnv_dbl_sng(FP_ARG_REG(arg_reg), FP_ARG_REG(arg_reg))); + --arg_reg; + } + arg_offset += sizeof(stackval); + parameter_slot += 8; + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [param]->data.klass->enumtype) { + type = sig->params [param]->data.klass->enum_basetype->type; + goto typeswitch; + } else { + int size = mono_class_native_size (sig->params [param]->data.klass, NULL); + // assumes struct is 8 byte aligned whatever its size... (as interp.c guarantees at present) + // copies multiple of 8 bytes which may include some trailing garbage but should be safe + if (size <= 8) { + if (args_on_stack) { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + ADD_INST(code, pc, gen_ldd(0, 5, 5)); + ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + } else { + ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); + ADD_INST(code, pc, gen_ldd(0, arg_reg, arg_reg)); + --arg_reg; + } + parameter_slot += 8; + } else { + int soffset = 0; + if ((parameter_slot & 15) != 0) { + --arg_reg; + if (arg_reg < 19) { + args_on_stack = 1; + } + parameter_slot += 8; + } + ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + // might generate a lot of code for very large structs... should + // use a loop or routine call them + while (size > 0) { + if (args_on_stack) { + ADD_INST(code, pc, gen_ldd(soffset, 5, 31)); + ADD_INST(code, pc, gen_std(31, parameter_slot, 30)); + } else { + ADD_INST(code, pc, gen_ldd(soffset, 5, arg_reg)); + --arg_reg; + if (arg_reg < 19) + args_on_stack = 1; + } + parameter_slot += 8; + soffset += 8; + size -= 8; + } + } + arg_offset += sizeof(stackval); + break; + } + break; + default: + g_error ("mono_create_trampoline: unhandled arg type %d", type); + return NULL; + } + + if (arg_reg < 19) { + args_on_stack = 1; + } + } + + // for large return structs just pass on the buffer given to us. + if (sig->ret->type == MONO_TYPE_VALUETYPE && sig->ret->data.klass->enumtype == 0) { + int size = mono_class_native_size (sig->ret->data.klass, NULL); + if (size > 16) { + ADD_INST(code, pc, gen_ldd(-56, 3, 28)); + ADD_INST(code, pc, gen_ldd(0, 28, 28)); + } + } + + ADD_INST(code, pc, gen_nop()); // NOP + ADD_INST(code, pc, gen_ldd(-64, 29, 5)); + ADD_INST(code, pc, gen_ldd(24, 5, 27)); + ADD_INST(code, pc, gen_ldd(16, 5, 5)); + ADD_INST(code, pc, gen_bve(5, 1)); + ADD_INST(code, pc, gen_ldo(parameter_offset + 64, 30, 29)); + ADD_INST(code, pc, gen_ldd(spill_offset + 16, 30, 27)); + ADD_INST(code, pc, gen_nop()); // NOP + + if (string_ctor) { + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_std(28, 0, 19)); // STD %r28,0(%r19) + } + else if (sig->ret->type != MONO_TYPE_VOID) { + int type = sig->ret->type; + + rettypeswitch: + switch (type) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_stb(28, 0, 19)); // STB %r28,0(%r19) + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_stw(28, 0, 19)); // STW %r28,0(%r19) + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_sth(28, 0, 19)); // STH %r28,0(%r19) + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_PTR: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_std(28, 0, 19)); // STD %r28,0(%r19) + break; + case MONO_TYPE_R8: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_fstd(4, 0, 19)); // FSTD %fr4,0(%r19) + break; + case MONO_TYPE_R4: + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, gen_fstw(4, 0, 19)); // FSTW %fr4r,0(%r19) + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + type = sig->ret->data.klass->enum_basetype->type; + goto rettypeswitch; + } else { + int size = mono_class_native_size (sig->ret->data.klass, NULL); + if (size <= 16) { + int reg = 28; + int off = 0; + ADD_INST(code, pc, gen_ldd(-56, 3, 19)); + ADD_INST(code, pc, gen_ldd(0, 19, 19)); + if (size > 8) { + ADD_INST(code, pc, gen_std(28, 0, 19)); + size -= 8; + reg = 29; + off += 8; + } + // get rest of value right aligned in the register + ADD_INST(code, pc, gen_extrdu(reg, 8 * size - 1, 8 * size, reg)); + if ((size & 1) != 0) { + ADD_INST(code, pc, gen_stb(reg, off + size - 1, 19)); + ADD_INST(code, pc, gen_extrdu(reg, 55, 56, reg)); + size -= 1; + } + if ((size & 2) != 0) { + ADD_INST(code, pc, gen_sth(reg, off + size - 2, 19)); + ADD_INST(code, pc, gen_extrdu(reg, 47, 48, reg)); + size -= 2; + } + if ((size & 4) != 0) + ADD_INST(code, pc, gen_stw(reg, off + size - 4, 19)); + } + break; + } + default: + g_error ("mono_create_trampoline: unhandled ret type %d", type); + return NULL; + } + } + + ADD_INST(code, pc, gen_ldd(-frame_size-16, 30, 2)); + ADD_INST(code, pc, gen_ldd(spill_offset, 30, 4)); + ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 5)); + ADD_INST(code, pc, gen_bve(2, 0)); + ADD_INST(code, pc, gen_lddmb(-frame_size, 30, 3)); + + if (code == NULL) { + descriptor = (void **)g_malloc(4 * sizeof(void *) + pc * sizeof(unsigned int)); + code = (unsigned int *)((char *)descriptor + 4 * sizeof(void *)); + save_pc = pc; + goto generate; + } else + g_assert(pc == save_pc); + + if (debug_asm) + fprintf(stderr, "generated: %d bytes\n", pc * 4); + + // must do this so we can actually execute the code we just put in memory + flush_cache(code, 4 * pc); + + descriptor[0] = 0; + descriptor[1] = 0; + descriptor[2] = code; + descriptor[3] = 0; + + return (MonoPIFunc)descriptor; +} + +void * +mono_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig = method->signature; + MonoJitInfo *ji; + int i; + int pc; + int param; + void **descriptor = NULL; + void **data = NULL; + unsigned int *code = NULL; + int arg_reg = 26; + int arg_offset = 0; + int frame_size; + int invoke_rec_offset; + int stack_vals_offset; + int stack_val_pos; + int arg_val_pos; + int spill_offset; + int *vtoffsets; + int t; + + if (debug_asm) { + fprintf(stderr, "mono_create_method_pointer %s: flags %d\n", method->name, method->flags); + fprintf(stderr, "method: # params %d has this %d exp this %d\n", sig->param_count, sig->hasthis, sig->explicit_this); + fprintf(stderr, "ret %d\n", sig->ret->type); + for (i = 0; i < sig->param_count; i++) + fprintf(stderr, "%d: %d\n", i, sig->params[i]->type); + } + + /* + * If it is a static P/Invoke method, we can just return the pointer + * to the method implementation. + */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + + // the extra stackval is for the return val if necessary + // the 64 is for outgoing parameters and the 16 is the frame marker. + // the other 16 is space for struct return vals < 16 bytes + frame_size = sizeof(MonoInvocation) + (sig->param_count + 1) * sizeof(stackval) + 16 + 64 + 16; + frame_size += 15; + frame_size &= ~15; + invoke_rec_offset = -frame_size; + vtoffsets = (int *)alloca(sig->param_count * sizeof(int)); + + t = invoke_rec_offset; + + for (i = 0; i < sig->param_count; ++i) + if (sig->params[i]->type == MONO_TYPE_VALUETYPE && + !sig->params[i]->data.klass->enumtype && !sig->params[i]->byref) { + int size = mono_class_native_size (sig->params[i]->data.klass, NULL); + size += 7; + size &= ~7; + t -= size; + frame_size += size; + vtoffsets[i] = t; + } + + stack_vals_offset = invoke_rec_offset + sizeof(MonoInvocation); + stack_vals_offset += 7; + stack_vals_offset &= ~7; + frame_size += 32; + frame_size += 15; + frame_size &= ~15; + spill_offset = -frame_size + 8; + +generate: + stack_val_pos = stack_vals_offset; + arg_val_pos = -64; + pc = 0; + + ADD_INST(code, pc, gen_std(2, -16, 30)); + ADD_INST(code, pc, gen_stdma(3, frame_size, 30)); + ADD_INST(code, pc, gen_std(4, spill_offset, 30)); + ADD_INST(code, pc, gen_copy(29, 3)); + ADD_INST(code, pc, gen_std(27, spill_offset + 8, 30)); + ADD_INST(code, pc, gen_std(28, spill_offset + 16, 30)); + ADD_INST(code, pc, gen_nop()); + + ADD_INST(code, pc, gen_std(26, -64, 29)); // STD %r26,-64(%r29) + ADD_INST(code, pc, gen_std(25, -56, 29)); // STD %r25,-56(%r29) + ADD_INST(code, pc, gen_std(24, -48, 29)); // STD %r24,-48(%r29) + ADD_INST(code, pc, gen_std(23, -40, 29)); // STD %r23,-40(%r29) + ADD_INST(code, pc, gen_std(22, -32, 29)); // STD %r22,-32(%r29) + ADD_INST(code, pc, gen_std(21, -24, 29)); // STD %r21,-24(%r29) + ADD_INST(code, pc, gen_std(20, -16, 29)); // STD %r20,-16(%r29) + ADD_INST(code, pc, gen_std(19, -8, 29)); // STD %r19,-8(%r29) + + ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, parent), 30)); + ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, child), 30)); + ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex), 30)); + ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex_handler), 30)); + ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ip), 30)); + + if (data != NULL) + data[0] = method; + ADD_INST(code, pc, gen_ldd(0, 27, 19)); + ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, method), 30)); + + if (sig->hasthis) { + if (sig->call_convention != MONO_CALL_THISCALL) { + ADD_INST(code, pc, gen_std(arg_reg, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, obj), 30)); + arg_val_pos += 8; + } else { + fprintf(stderr, "case I didn't handle 2\n"); + } + } + + if (data != NULL) + data[2] = (void *)stackval_from_data; + + for (i = 0; i < sig->param_count; ++i) { + if (data != NULL) + data[4 + i] = sig->params[i]; + ADD_INST(code, pc, gen_ldd((4 + i) * 8, 27, 26)); // LDD x(%r27),%r26 == type + ADD_INST(code, pc, gen_ldo(stack_val_pos, 30, 25)); // LDD x(%r30),%r25 == &stackval + if (sig->params[i]->byref) { + ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + } else { + int type = sig->params[i]->type; + typeswitch: + switch (type) { + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_PTR: + case MONO_TYPE_R8: + ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + ADD_INST(code, pc, gen_ldo(arg_val_pos + 4, 3, 24)); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + ADD_INST(code, pc, gen_ldo(arg_val_pos + 6, 3, 24)); + break; + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_BOOLEAN: + ADD_INST(code, pc, gen_ldo(arg_val_pos + 7, 3, 24)); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + type = sig->params [i]->data.klass->enum_basetype->type; + goto typeswitch; + } else { + int size = mono_class_native_size (sig->params[i]->data.klass, NULL); + if (size <= 8) + ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + else { + arg_val_pos += 15; + arg_val_pos &= ~15; + ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + } + + arg_val_pos += size; + arg_val_pos += 7; + arg_val_pos &= ~7; + arg_val_pos -=8 ; // as it is incremented later + + ADD_INST(code, pc, gen_ldo(vtoffsets[i], 30, 19)); + ADD_INST(code, pc, gen_std(19, 0, 25)); + } + break; + default: + fprintf(stderr, "can not cope in create method pointer %d\n", sig->params[i]->type); + break; + } + } + + ADD_INST(code, pc, gen_ldo(sig->pinvoke, 0, 23)); // LDI sig->pinvoke,%r23 + ADD_INST(code, pc, gen_ldd(16, 27, 19)); // LDD x(%r27),%r19 == stackval_from_data + ADD_INST(code, pc, gen_ldd(16, 19, 20)); // LDD 16(%r19),%r20 + ADD_INST(code, pc, gen_ldd(24, 19, 27)); // LDD 24(%r19),%r27 + ADD_INST(code, pc, gen_bve(20, 1)); // BVE,L (%r20),%r2 + ADD_INST(code, pc, gen_ldo(-16, 30, 29)); // LDO -16(%r30),%r29 + ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 27)); + + stack_val_pos += sizeof (stackval); + arg_val_pos += 8; + g_assert(stack_val_pos < -96); + } + + ADD_INST(code, pc, gen_ldo(stack_vals_offset, 30, 19)); + ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, stack_args), 30)); + ADD_INST(code, pc, gen_ldo(stack_val_pos, 30, 19)); + ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, retval), 30)); + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) { + int size = mono_class_native_size (sig->ret->data.klass, NULL); + // for large return structs pass on the pointer given us by our caller. + if (size > 16) + ADD_INST(code, pc, gen_ldd(spill_offset + 16, 30, 28)); + else // use space left on stack for the return value + ADD_INST(code, pc, gen_ldo(stack_val_pos + sizeof(stackval), 30, 28)); + ADD_INST(code, pc, gen_std(28, stack_val_pos, 30)); + } + + ADD_INST(code, pc, gen_ldo(invoke_rec_offset, 30, 26)); // address of invocation + + if (data != NULL) + data[1] = (void *)ves_exec_method; + ADD_INST(code, pc, gen_ldd(8, 27, 19)); // LDD 8(%r27),%r19 + ADD_INST(code, pc, gen_ldd(16, 19, 20)); // LDD 16(%r19),%r20 + ADD_INST(code, pc, gen_ldd(24, 19, 27)); // LDD 24(%r19),%r27 + ADD_INST(code, pc, gen_bve(20, 1)); // BVE,L (%r20),%r2 + ADD_INST(code, pc, gen_ldo(-16, 30, 29)); // LDO -16(%r30),%r29 + ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 27)); + if (sig->ret->byref) { + fprintf(stderr, "can'ty cope with ret byref\n"); + } else { + int simpletype = sig->ret->type; + enum_retvalue: + switch (simpletype) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + ADD_INST(code, pc, gen_ldw(stack_val_pos, 30, 28)); // LDW x(%r30),%r28 + break; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_STRING: + case MONO_TYPE_OBJECT: + case MONO_TYPE_CLASS: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_PTR: + ADD_INST(code, pc, gen_ldd(stack_val_pos, 30, 28)); // LDD x(%r30),%r28 + break; + case MONO_TYPE_R8: + ADD_INST(code, pc, gen_fldd(stack_val_pos, 30, 4)); // FLDD x(%r30),%fr4 + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } else { + int size = mono_class_native_size (sig->ret->data.klass, NULL); + if (size <= 16) { + ADD_INST(code, pc, gen_ldd(stack_val_pos, 30, 28)); + if (size > 8) + ADD_INST(code, pc, gen_ldd(8, 28, 29)); + ADD_INST(code, pc, gen_ldd(0, 28, 28)); + } + } + break; + default: + fprintf(stderr, "can't cope with ret type %d\n", simpletype); + return NULL; + } + } + + ADD_INST(code, pc, gen_ldd(-frame_size-16, 30, 2)); + ADD_INST(code, pc, gen_ldd(spill_offset, 30, 4)); + ADD_INST(code, pc, gen_bve(2, 0)); + ADD_INST(code, pc, gen_lddmb(-frame_size, 30, 3)); + if (code == NULL) { + descriptor = (void **)malloc((8 + sig->param_count) * sizeof(void *) + sizeof(unsigned int) * pc); + data = descriptor + 4; + code = (unsigned int *)(data + 4 + sig->param_count); + goto generate; + } + + flush_cache(code, 4 * pc); + + descriptor[0] = 0; + descriptor[1] = 0; + descriptor[2] = code; + descriptor[3] = data; + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 4; // does this matter? + ji->code_start = descriptor; + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; +} -- cgit v1.1 From e4f9a75ed58f5ca214a685041f2a538e2f40fe1f Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Mon, 13 Oct 2003 22:56:37 +0000 Subject: 2003-10-13 Bernie Solomon * Makefile.am: add hppa subdir svn path=/trunk/mono/; revision=18999 --- ChangeLog | 2 ++ Makefile.am | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 892b9b3..1df6a2f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2003-10-13 Bernie Solomon + * Makefile.am: add hppa subdir + * hppa/tramp.c: add initial implementation - this is 64 bit only hppa/Makefile.am hppa/.cvsignore: added diff --git a/Makefile.am b/Makefile.am index 4f68e74..acf603d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 alpha +DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From c41c989929efaf77826634392c8ce9c54525809d Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Tue, 14 Oct 2003 05:17:17 +0000 Subject: 2003-10-13 Bernie Solomon * x86/tramp.c: restore EDX after memcpy call svn path=/trunk/mono/; revision=19024 --- ChangeLog | 4 ++++ x86/tramp.c | 2 ++ 2 files changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 1df6a2f..b5d71cf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,9 @@ 2003-10-13 Bernie Solomon + * x86/tramp.c: restore EDX after memcpy call + +2003-10-13 Bernie Solomon + * Makefile.am: add hppa subdir * hppa/tramp.c: add initial implementation - this is 64 bit only diff --git a/x86/tramp.c b/x86/tramp.c index 3a0d50c..4c71bc6 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -201,6 +201,8 @@ enum_marshal: x86_mov_reg_imm (p, X86_EAX, memcpy); x86_call_reg (p, X86_EAX); x86_alu_reg_imm (p, X86_ADD, X86_ESP, 12); + /* memcpy might clobber EDX so restore it */ + x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); } } else { /* it's an enum value */ -- cgit v1.1 From ebebe8e4565897dfaad69911c88f4dda134d4b84 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 31 Oct 2003 13:03:36 +0000 Subject: 2003-10-31 Zoltan Varga * */tramp.c (mono_create_method_pointer): Rename to mono_arch_create_method_pointer, move common code to a new function in interp.c. * */tramp.c (mono_create_trampoline): Rename to mono_arch_create_trampoline for consistency. svn path=/trunk/mono/; revision=19500 --- ChangeLog | 9 +++++++++ alpha/tramp.c | 4 ++-- arm/tramp.c | 18 ++---------------- hppa/tramp.c | 18 ++---------------- ppc/tramp.c | 18 ++---------------- s390/tramp.c | 6 +++--- sparc/tramp.c | 14 ++------------ x86/tramp.c | 18 ++---------------- 8 files changed, 24 insertions(+), 81 deletions(-) diff --git a/ChangeLog b/ChangeLog index b5d71cf..655dcba 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,12 @@ +2003-10-31 Zoltan Varga + + * */tramp.c (mono_create_method_pointer): Rename to + mono_arch_create_method_pointer, move common code to a new function in + interp.c. + + * */tramp.c (mono_create_trampoline): Rename to + mono_arch_create_trampoline for consistency. + 2003-10-13 Bernie Solomon * x86/tramp.c: restore EDX after memcpy call diff --git a/alpha/tramp.c b/alpha/tramp.c index c02a83a..ee5e94c 100644 --- a/alpha/tramp.c +++ b/alpha/tramp.c @@ -213,7 +213,7 @@ static void calculate_size(MonoMethodSignature *sig, int * INSTRUCTIONS, int * S } MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { unsigned char *p; unsigned char *buffer; @@ -352,7 +352,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) } void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { g_error ("Unsupported arch"); return NULL; diff --git a/arm/tramp.c b/arm/tramp.c index 12793c8..8bebf1b 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -97,7 +97,7 @@ void* alloc_code_buff (int num_instr) /* * Refer to ARM Procedure Call Standard (APCS) for more info. */ -MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { MonoType* param; MonoPIFunc code_buff; @@ -435,7 +435,7 @@ enum_retvalue: * Still need to figure out how to handle the exception stuff * across the managed/unmanaged boundary. */ -void* mono_create_method_pointer (MonoMethod* method) +void* mono_arch_create_method_pointer (MonoMethod* method) { MonoMethodSignature* sig; guchar* p, * p_method, * p_stackval_from_data, * p_exec; @@ -444,20 +444,6 @@ void* mono_create_method_pointer (MonoMethod* method) int areg, reg_args, shift, pos; MonoJitInfo *ji; - /* - * If it is a static P/Invoke method just - * just return the pointer to the implementation - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0(MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add(mono_root_domain, ji); - return method->addr; - } - code_buff = alloc_code_buff(128); p = (guchar*)code_buff; diff --git a/hppa/tramp.c b/hppa/tramp.c index 8302a14..0604eb6 100644 --- a/hppa/tramp.c +++ b/hppa/tramp.c @@ -295,7 +295,7 @@ static void flush_cache(void *address, int length) */ MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { int pc, save_pc; int param; @@ -677,7 +677,7 @@ generate: } void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig = method->signature; MonoJitInfo *ji; @@ -706,20 +706,6 @@ mono_create_method_pointer (MonoMethod *method) fprintf(stderr, "%d: %d\n", i, sig->params[i]->type); } - /* - * If it is a static P/Invoke method, we can just return the pointer - * to the method implementation. - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add (mono_root_domain, ji); - return method->addr; - } - // the extra stackval is for the return val if necessary // the 64 is for outgoing parameters and the 16 is the frame marker. // the other 16 is space for struct return vals < 16 bytes diff --git a/ppc/tramp.c b/ppc/tramp.c index 529d1b4..124eabf 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -591,7 +591,7 @@ emit_epilog (guint8 *p, MonoMethodSignature *sig, guint stack_size) } MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { guint8 *p, *code_buffer; guint stack_size, code_size; @@ -650,7 +650,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) * across the managed/unmanaged boundary. */ void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; @@ -660,20 +660,6 @@ mono_create_method_pointer (MonoMethod *method) gint *vtbuf; guint32 simpletype; - /* - * If it is a static P/Invoke method, we can just return the pointer - * to the method implementation. - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add (mono_root_domain, ji); - return method->addr; - } - code_size = 1024; stack_size = 1024; stack_param = 0; diff --git a/s390/tramp.c b/s390/tramp.c index d8ecbb5..c5ebbf9 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -796,7 +796,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /*------------------------------------------------------------------*/ /* */ -/* Name - mono_create_method_pointer */ +/* Name - mono_arch_create_method_pointer */ /* */ /* Function - Returns a pointer to a native function that can */ /* be used to call the specified method. */ @@ -811,7 +811,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /* */ /* Logic: */ /* ------ */ -/* mono_create_method_pointer (MonoMethod *method) */ +/* mono_arch_create_method_pointer (MonoMethod *method) */ /* create the unmanaged->managed wrapper */ /* register it with mono_jit_info_table_add() */ /* */ @@ -848,7 +848,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /*------------------------------------------------------------------*/ void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; diff --git a/sparc/tramp.c b/sparc/tramp.c index 7dae8e9..0bf55c1 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -698,7 +698,7 @@ emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig, } MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { guint32 *p, *code_buffer; guint stack_size, code_size, i; @@ -744,7 +744,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) #define MINV_POS (MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS) void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; @@ -755,16 +755,6 @@ mono_create_method_pointer (MonoMethod *method) gint *vtbuf; gint32 simpletype; - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add (mono_root_domain, ji); - return method->addr; - } - code_size = 1024; /* these should be calculated... */ stack_size = 1024; stack_param = 0; diff --git a/x86/tramp.c b/x86/tramp.c index 4c71bc6..3b2e7c9 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -32,7 +32,7 @@ #define ARG_SIZE sizeof (stackval) MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { unsigned char *p, *code_buffer; guint32 stack_size = 0, code_size = 50; @@ -339,7 +339,7 @@ enum_marshal: * across the managed/unmanaged boundary. */ void * -mono_create_method_pointer (MonoMethod *method) +mono_arch_create_method_pointer (MonoMethod *method) { MonoMethodSignature *sig; MonoJitInfo *ji; @@ -349,20 +349,6 @@ mono_create_method_pointer (MonoMethod *method) int i, size, align, cpos; int *vtbuf; - /* - * If it is a static P/Invoke method, we can just return the pointer - * to the method implementation. - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add (mono_root_domain, ji); - return method->addr; - } - sig = method->signature; code_buffer = p = alloca (512); /* FIXME: check for overflows... */ -- cgit v1.1 From 96651158bf48aa1c31b5f2e3ca4cbf904211b1dc Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 13 Nov 2003 15:23:48 +0000 Subject: Thu Nov 13 16:24:29 CET 2003 Paolo Molaro * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct. svn path=/trunk/mono/; revision=19938 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 59 ++++++++++++++++++++++++++++--------------------------- 2 files changed, 35 insertions(+), 29 deletions(-) diff --git a/ChangeLog b/ChangeLog index 655dcba..1dadd04 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Thu Nov 13 16:24:29 CET 2003 Paolo Molaro + + * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct. + 2003-10-31 Zoltan Varga * */tramp.c (mono_create_method_pointer): Rename to diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 2f62aa4..dd7b4b7 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -114,6 +114,7 @@ enum { #define ppc_emit32(c,x) do { *((guint32 *) c) = x; ((guint32 *)c)++;} while (0) #define ppc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) +#define ppc_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) #define ppc_load(c,D,v) do { \ if (ppc_is_imm16 ((v))) { \ @@ -290,70 +291,70 @@ my and Ximian's copyright to this code. ;) #define ppc_eqv(c,A,S,B) ppc_eqvx(c,A,S,B,0) #define ppc_eqvd(c,A,S,B) ppc_eqvx(c,A,S,B,1) -#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 12) | (954 << 1) | Rc) +#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (954 << 1) | Rc) #define ppc_extsb(c,A,S) ppc_extsbx(c,A,S,0) #define ppc_extsbd(c,A,S) ppc_extsbx(c,A,S,1) -#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 12) | (922 << 1) | Rc) +#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (922 << 1) | Rc) #define ppc_extsh(c,A,S) ppc_extshx(c,A,S,0) #define ppc_extshd(c,A,S) ppc_extshx(c,A,S,1) -#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (264 << 1) | Rc) +#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (264 << 1) | Rc) #define ppc_fabs(c,D,B) ppc_fabsx(c,D,B,0) #define ppc_fabsd(c,D,B) ppc_fabsx(c,D,B,1) -#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (21 << 1) | Rc) +#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc) #define ppc_fadd(c,D,A,B) ppc_faddx(c,D,A,B,0) #define ppc_faddd(c,D,A,B) ppc_faddx(c,D,A,B,1) -#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (21 << 1) | Rc) +#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc) #define ppc_fadds(c,D,A,B) ppc_faddsx(c,D,A,B,0) #define ppc_faddsd(c,D,A,B) ppc_faddsx(c,D,A,B,1) -#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 12) | (32 << 1) | 0) -#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 12) | (0 << 1) | 0) +#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (32 << 1) | 0) +#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (0 << 1) | 0) -#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (14 << 1) | Rc) +#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (14 << 1) | Rc) #define ppc_fctiw(c,D,B) ppc_fctiwx(c,D,B,0) #define ppc_fctiwd(c,D,B) ppc_fctiwx(c,D,B,1) -#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (15 << 1) | Rc) +#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (15 << 1) | Rc) #define ppc_fctiwz(c,D,B) ppc_fctiwzx(c,D,B,0) #define ppc_fctiwzd(c,D,B) ppc_fctiwzx(c,D,B,1) -#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (18 << 1) | Rc) +#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc) #define ppc_fdiv(c,D,A,B) ppc_fdivx(c,D,A,B,0) #define ppc_fdivd(c,D,A,B) ppc_fdivx(c,D,A,B,1) -#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (0 << 7) | (18 << 1) | Rc) +#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc) #define ppc_fdivs(c,D,A,B) ppc_fdivsx(c,D,A,B,0) #define ppc_fdivsd(c,D,A,B) ppc_fdivsx(c,D,A,B,1) -#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 12) | (C << 7) | (29 << 1) | Rc) +#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc) #define ppc_fmadd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,0) #define ppc_fmaddd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,1) -#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 12) | (C << 7) | (29 << 1) | Rc) +#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc) #define ppc_fmadds(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,0) #define ppc_fmaddsd(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,1) -#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 12) | (72 << 1) | Rc) +#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (72 << 1) | Rc) #define ppc_fmr(c,D,B) ppc_fmrx(c,D,B,0) #define ppc_fmrd(c,D,B) ppc_fmrx(c,D,B,1) -#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (C << 7) | (28 << 1) | Rc) +#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc) #define ppc_fmsub(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,0) #define ppc_fmsubd(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,1) -#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (28 << 1) | Rc) +#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc) #define ppc_fmsubs(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,0) #define ppc_fmsubsd(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,1) -#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 7) | (25 << 1) | Rc) +#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc) #define ppc_fmul(c,D,A,C) ppc_fmulx(c,D,A,C,0) #define ppc_fmuld(c,D,A,C) ppc_fmulx(c,D,A,C,1) -#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 7) | (25 << 1) | Rc) +#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc) #define ppc_fmuls(c,D,A,C) ppc_fmulsx(c,D,A,C,0) #define ppc_fmulsd(c,D,A,C) ppc_fmulsx(c,D,A,C,1) @@ -365,23 +366,23 @@ my and Ximian's copyright to this code. ;) #define ppc_fneg(c,D,B) ppc_fnegx(c,D,B,0) #define ppc_fnegd(c,D,B) ppc_fnegx(c,D,B,1) -#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (31 << 1) | Rc) +#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc) #define ppc_fnmadd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,0) #define ppc_fnmaddd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,1) -#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (31 << 1) | Rc) +#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc) #define ppc_fnmadds(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,0) #define ppc_fnmaddsd(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,1) -#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (30 << 1) | Rc) +#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc) #define ppc_fnmsub(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,0) #define ppc_fnmsubd(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,1) -#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (30 << 1) | Rc) +#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc) #define ppc_fnmsubs(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,0) #define ppc_fnmsubsd(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,1) -#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (24 << 1) | Rc) +#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (24 << 1) | Rc) #define ppc_fres(c,D,B) ppc_fresx(c,D,B,0) #define ppc_fresd(c,D,B) ppc_fresx(c,D,B,1) @@ -389,27 +390,27 @@ my and Ximian's copyright to this code. ;) #define ppc_frsp(c,D,B) ppc_frspx(c,D,B,0) #define ppc_frspd(c,D,B) ppc_frspx(c,D,B,1) -#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (26 << 1) | Rc) +#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (26 << 1) | Rc) #define ppc_frsqrte(c,D,B) ppc_frsqrtex(c,D,B,0) #define ppc_frsqrted(c,D,B) ppc_frsqrtex(c,D,B,1) -#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 7) | (23 << 1) | Rc) +#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (23 << 1) | Rc) #define ppc_fsel(c,D,A,C,B) ppc_fselx(c,D,A,C,B,0) #define ppc_fseld(c,D,A,C,B) ppc_fselx(c,D,A,C,B,1) -#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (22 << 1) | Rc) +#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc) #define ppc_fsqrt(c,D,B) ppc_fsqrtx(c,D,B,0) #define ppc_fsqrtd(c,D,B) ppc_fsqrtx(c,D,B,1) -#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 7) | (22 << 1) | Rc) +#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc) #define ppc_fsqrts(c,D,B) ppc_fsqrtsx(c,D,B,0) #define ppc_fsqrtsd(c,D,B) ppc_fsqrtsx(c,D,B,1) -#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 7) | (20 << 1) | Rc) +#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc) #define ppc_fsub(c,D,A,B) ppc_fsubx(c,D,A,B,0) #define ppc_fsubd(c,D,A,B) ppc_fsubx(c,D,A,B,1) -#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 7) | (20 << 1) | Rc) +#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc) #define ppc_fsubs(c,D,A,B) ppc_fsubsx(c,D,A,B,0) #define ppc_fsubsd(c,D,A,B) ppc_fsubsx(c,D,A,B,1) -- cgit v1.1 From 7e4789fdfc87f75e63612fe0aca1f66d76134ba9 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 3 Dec 2003 16:48:07 +0000 Subject: Typo fix. svn path=/trunk/mono/; revision=20745 --- ppc/ppc-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index dd7b4b7..d09afd5 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -591,7 +591,7 @@ my and Ximian's copyright to this code. ;) #define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0) #define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1) -#define ppc_sub(c,D,A,B) ppc_subf(c,D,B.A) +#define ppc_sub(c,D,A,B) ppc_subf(c,D,B,A) #define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc) #define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0) -- cgit v1.1 From 963e1b962894e9b434a2e80e63394bd0d34e68b8 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Sat, 3 Jan 2004 21:42:37 +0000 Subject: Codegen macros for mips. svn path=/trunk/mono/; revision=21658 --- mips/mips-codegen.h | 340 ++++++++++++++++++++++++++++++++++++++++++++++++++++ mips/test.c | 149 +++++++++++++++++++++++ 2 files changed, 489 insertions(+) create mode 100644 mips/mips-codegen.h create mode 100644 mips/test.c diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h new file mode 100644 index 0000000..8cc0cd0 --- /dev/null +++ b/mips/mips-codegen.h @@ -0,0 +1,340 @@ +#ifndef __MIPS_CODEGEN_H__ +#define __MIPS_CODEGEN_H__ +/* + * Copyright (c) 2004 Novell, Inc + * Author: Paolo Molaro (lupus@ximian.com) + * + */ + +/* registers */ +enum { + mips_zero, + mips_at, /* assembler temp */ + mips_v0, /* return values */ + mips_v1, + mips_a0, /* 4 - func arguments */ + mips_a1, + mips_a2, + mips_a3, + mips_t0, /* 8 temporaries */ + mips_t1, + mips_t2, + mips_t3, + mips_t4, + mips_t5, + mips_t6, + mips_t7, + mips_s0, /* 16 calle saved */ + mips_s1, + mips_s2, + mips_s3, + mips_s4, + mips_s5, + mips_s6, + mips_s7, + mips_t8, /* 24 temps */ + mips_t9, + mips_k0, /* 26 kernel-reserved */ + mips_k1, + mips_gp, /* 28 */ + mips_sp, /* stack pointer */ + mips_fp, /* frame pointer */ + mips_ra /* return address */ +}; + +/* we treat the register file as containing just doubles... */ +enum { + mips_f0, /* return regs */ + mips_f2, + mips_f4, /* temps */ + mips_f6, + mips_f8, + mips_f10, + mips_f12, /* first arg */ + mips_f14, /* second arg */ + mips_f16, /* temps */ + mips_f18, + mips_f20, /* callee saved */ + mips_f22, + mips_f24, + mips_f26, + mips_f28, + mips_f30 +}; + +#define mips_emit32(c,x) do { *((unsigned int *) c) = x; ((unsigned int *)c)++;} while (0) +#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|(imm))) +#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|(imm))) +#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func))) +#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun))) + +/* prefetch hints */ +enum { + MIPS_FOR_LOAD, + MIPS_FOR_STORE, + MIPS_FOR_LOAD_STREAMED = 4, + MIPS_FOR_STORE_STREAMED, + MIPS_FOR_LOAD_RETAINED, + MIPS_FOR_STORE_RETAINED +}; + +/* coprocessors */ +enum { + MIPS_COP0, + MIPS_COP1, + MIPS_COP2, + MIPS_COP3 +}; + +enum { + MIPS_FMT_SINGLE = 16, + MIPS_FMT_DOUBLE = 17, + MIPS_FMT_WORD = 20, + MIPS_FMT_LONG = 21, + MIPS_FMT3_SINGLE = 0, + MIPS_FMT3_DOUBLE = 1 +}; + +/* fpu rounding mode */ +enum { + MIPS_ROUND_TO_NEAREST, + MIPS_ROUND_TO_ZERO, + MIPS_ROUND_TO_POSINF, + MIPS_ROUND_TO_NEGINF, + MIPS_ROUND_MASK = 3 +}; + +/* fpu enable/cause flags, cc */ +enum { + MIPS_FPU_C_MASK = 1 << 23, + MIPS_INEXACT = 1, + MIPS_UNDERFLOW = 2, + MIPS_OVERFLOW = 4, + MIPS_DIVZERO = 8, + MIPS_INVALID = 16, + MIPS_NOTIMPL = 32, + MIPS_FPU_FLAGS_OFFSET = 2, + MIPS_FPU_ENABLES_OFFSET = 7, + MIPS_FPU_CAUSES_OFFSET = 12 +}; + +/* fpu condition values */ +enum { + MIPS_FPU_FALSE, /* TRUE */ + MIPS_FPU_UNORDERED, /* ORDERED */ + MIPS_FPU_EQ, /* NOT_EQUAL */ + MIPS_FPU_UNORD_EQ, /* ORDERED or NEQ */ + MIPS_FPU_ORD_LT, /* UNORDERED or GE */ + MIPS_FPU_UNORD_LT, /* ORDERED or GE */ + MIPS_FPU_ORD_LE, /* UNORDERED or GT */ + MIPS_FPU_UNORD_LE /* OREDERED or GT */ +}; + +/* arithmetric ops */ +#define mips_add(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,32) +#define mips_addi(c,dest,src1,imm) mips_format_i(c,8,src1,dest,imm) +#define mips_addu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,33) +#define mips_addiu(c,dest,src1,imm) mips_format_i(c,9,src1,dest,imm) +#define mips_dadd(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,44) +#define mips_daddi(c,dest,src1,imm) mips_format_i(c,24,src1,dest,imm) +#define mips_daddu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,45) +#define mips_daddiu(c,dest,src1,imm) mips_format_i(c,25,src1,dest,imm) +#define mips_dsub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,46) +#define mips_dsubu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,47) +#define mips_sub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,34) +#define mips_subu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,35) + +/* div and mul ops */ +#define mips_ddiv(c,src1,src2) mips_format_divmul(c,0,src1,src2,30) +#define mips_ddivu(c,src1,src2) mips_format_divmul(c,0,src1,src2,31) +#define mips_div(c,src1,src2) mips_format_divmul(c,0,src1,src2,26) +#define mips_divu(c,src1,src2) mips_format_divmul(c,0,src1,src2,27) +#define mips_dmult(c,src1,src2) mips_format_divmul(c,0,src1,src2,28) +#define mips_dmultu(c,src1,src2) mips_format_divmul(c,0,src1,src2,29) +#define mips_mult(c,src1,src2) mips_format_divmul(c,0,src1,src2,24) +#define mips_multu(c,src1,src2) mips_format_divmul(c,0,src1,src2,25) + +/* shift ops */ +#define mips_dsll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,56) +#define mips_dsll32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,60) +#define mips_dsllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,20) +#define mips_dsra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,59) +#define mips_dsra32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,63) +#define mips_dsrav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,23) +#define mips_dsrl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,58) +#define mips_dsrl32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,62) +#define mips_dsrlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,22) +#define mips_sll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,0) +#define mips_sllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,4) +#define mips_sra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,3) +#define mips_srav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,7) +#define mips_srl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,2) +#define mips_srlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,6) + +/* logical ops */ +#define mips_and(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,36) +#define mips_andi(c,dest,src1,imm) mips_format_i(c,12,src1,dest,imm) +#define mips_nor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,39) +#define mips_or(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,37) +#define mips_ori(c,dest,src1,uimm) mips_format_i(c,13,src1,dest,uimm) +#define mips_xor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,38) +#define mips_xori(c,dest,src1,uimm) mips_format_i(c,14,src1,dest,uimm) + +/* compares */ +#define mips_slt(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,42) +#define mips_slti(c,dest,src1,imm) mips_format_i(c,10,src1,dest,imm) +#define mips_sltiu(c,dest,src1,imm) mips_format_i(c,11,src1,dest,imm) +#define mips_sltu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,43) +/* missing traps: teq, teqi, tge, tgei, tgeiu, tgeu, tlt, tlti, tltiu, tltu, tne, tnei, */ + +/* conditional branches */ +#define mips_beq(c,src1,src2,offset) mips_format_i(c,4,src1,src2,offset) +#define mips_beql(c,src1,src2,offset) mips_format_i(c,20,src1,src2,offset) +#define mips_bgez(c,src1,offset) mips_format_i(c,1,src1,1,offset) +#define mips_bgezal(c,src1,offset) mips_format_i(c,1,src1,17,offset) +#define mips_bgezall(c,src1,offset) mips_format_i(c,1,src1,19,offset) +#define mips_bgezl(c,src1,offset) mips_format_i(c,1,src1,3,offset) +#define mips_bgtz(c,src1,offset) mips_format_i(c,7,src1,0,offset) +#define mips_bgtzl(c,src1,offset) mips_format_i(c,23,src1,0,offset) +#define mips_blez(c,src1,offset) mips_format_i(c,6,src1,0,offset) +#define mips_blezl(c,src1,offset) mips_format_i(c,22,src1,0,offset) +#define mips_bltz(c,src1,offset) mips_format_i(c,1,src1,0,offset) +#define mips_bltzal(c,src1,offset) mips_format_i(c,1,src1,16,offset) +#define mips_bltzall(c,src1,offset) mips_format_i(c,1,src1,18,offset) +#define mips_bltzl(c,src1,offset) mips_format_i(c,1,src1,2,offset) +#define mips_bne(c,src1,src2,offset) mips_format_i(c,5,src1,src2,offset) +#define mips_bnel(c,src1,src2,offset) mips_format_i(c,21,src1,src2,offset) + +/* uncond branches and calls */ +#define mips_jump(c,target) mips_format_j(c,2,target) +#define mips_jumpl(c,target) mips_format_j(c,3,target) +#define mips_jalr(c,src1,retreg) mips_format_r(c,0,src1,0,retreg,0,9) +#define mips_jr(c,src1) mips_emit32(c,((src1)<<21)|8) + +/* loads and stores */ +#define mips_lb(c,dest,base,offset) mips_format_i(c,32,base,dest,offset) +#define mips_lbu(c,dest,base,offset) mips_format_i(c,36,base,dest,offset) +#define mips_ld(c,dest,base,offset) mips_format_i(c,55,base,dest,offset) +#define mips_ldl(c,dest,base,offset) mips_format_i(c,26,base,dest,offset) +#define mips_ldr(c,dest,base,offset) mips_format_i(c,27,base,dest,offset) +#define mips_lh(c,dest,base,offset) mips_format_i(c,33,base,dest,offset) +#define mips_lhu(c,dest,base,offset) mips_format_i(c,37,base,dest,offset) +#define mips_ll(c,dest,base,offset) mips_format_i(c,48,base,dest,offset) +#define mips_lld(c,dest,base,offset) mips_format_i(c,52,base,dest,offset) +#define mips_lui(c,dest,base,uimm) mips_format_i(c,15,base,dest,uimm) +#define mips_lw(c,dest,base,offset) mips_format_i(c,35,base,dest,offset) +#define mips_lwl(c,dest,base,offset) mips_format_i(c,34,base,dest,offset) +#define mips_lwr(c,dest,base,offset) mips_format_i(c,38,base,dest,offset) +#define mips_lwu(c,dest,base,offset) mips_format_i(c,39,base,dest,offset) +#define mips_sb(c,src,base,offset) mips_format_i(c,40,base,src,offset) +#define mips_sc(c,src,base,offset) mips_format_i(c,56,base,src,offset) +#define mips_scd(c,src,base,offset) mips_format_i(c,60,base,src,offset) +#define mips_sd(c,src,base,offset) mips_format_i(c,63,base,src,offset) +#define mips_sdl(c,src,base,offset) mips_format_i(c,44,base,src,offset) +#define mips_sdr(c,src,base,offset) mips_format_i(c,45,base,src,offset) +#define mips_sh(c,src,base,offset) mips_format_i(c,41,base,src,offset) +#define mips_sw(c,src,base,offset) mips_format_i(c,43,base,src,offset) +#define mips_swl(c,src,base,offset) mips_format_i(c,50,base,src,offset) +#define mips_swr(c,src,base,offset) mips_format_i(c,54,base,src,offset) + +/* misc and coprocessor ops */ +#define mips_move(c,dest,src) mips_add(c,dest,src,mips_zero) +#define mips_nop(c) mips_sll(c,0,0,0) +#define mips_break(c,code) mips_emit32(c, ((code)<<6)|13) +#define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16) +#define mips_mflo(c,dest) mips_format_r(c,0,0,0,dest,0,18) +#define mips_mthi(c,src) mips_format_r(c,0,src,0,0,0,17) +#define mips_mtlo(c,src) mips_format_r(c,0,src,0,0,0,19) +#define mips_movn(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,11) +#define mips_movz(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,10) +#define mips_pref(c,hint,base,offset) mips_format_i(c,51,base,hint,offset) +#define mips_prefidx(c,hint,base,idx) mips_format_r(c,19,base,idx,hint,0,15) +#define mips_sync(c,stype) mips_emit32(c, ((stype)<<6)|15) +#define mips_syscall(c,code) mips_emit32(c, ((code)<<6)|12) + +#define mips_cop(c,cop,fun) mips_emit32(c, ((16|(cop))<<26)|(fun)) +#define mips_ldc(c,cop,dest,base,offset) mips_format_i(c,(52|(cop)),base,dest,offset) +#define mips_lwc(c,cop,dest,base,offset) mips_format_i(c,(48|(cop)),base,dest,offset) +#define mips_sdc(c,cop,src,base,offset) mips_format_i(c,(60|(cop)),base,src,offset) +#define mips_swc(c,cop,src,base,offset) mips_format_i(c,(56|(cop)),base,src,offset) +#define mips_cfc1(c,dest,src) mips_format_r(c,17,2,dest,src,0,0) +#define mips_ctc1(c,dest,src) mips_format_r(c,17,6,dest,src,0,0) + +/* fpu ops */ +#define mips_fabss(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,5) +#define mips_fabsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,5) +#define mips_fadds(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,0) +#define mips_faddd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,0) +#define mips_fdivs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,3) +#define mips_fdivd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,3) +#define mips_fmuls(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,2) +#define mips_fmuld(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,2) +#define mips_fnegs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,7) +#define mips_fnegd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,7) +#define mips_fsqrts(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,4) +#define mips_fsqrtd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,4) +#define mips_fsubs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,1) +#define mips_fsubd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,1) +#define mips_madds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_SINGLE) +#define mips_maddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_DOUBLE) +#define mips_nmadds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_SINGLE) +#define mips_nmaddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_DOUBLE) +#define mips_msubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_SINGLE) +#define mips_msubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_DOUBLE) +#define mips_nmsubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_SINGLE) +#define mips_nmsubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_DOUBLE) + +/* fp compare and branch */ +#define mips_fcmps(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,0,(3<<4)|(cond)) +#define mips_fcmpd(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,0,(3<<4)|(cond)) +#define mips_fbfalse(c,offset) mips_format_i(c,17,8,0,offset) +#define mips_fbfalsel(c,offset) mips_format_i(c,17,8,2,offset) +#define mips_fbtrue(c,offset) mips_format_i(c,17,8,1,offset) +#define mips_fbtruel(c,offset) mips_format_i(c,17,8,3,offset) + +/* fp convert */ +#define mips_ceills(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,10) +#define mips_ceilld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,10) +#define mips_ceilws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,14) +#define mips_ceilwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,14) +#define mips_cvtds(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,33) +#define mips_cvtdw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,33) +#define mips_cvtdl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,33) +#define mips_cvtls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,37) +#define mips_cvtld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,37) +#define mips_cvtsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,32) +#define mips_cvtsw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,32) +#define mips_cvtsl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,32) +#define mips_cvtws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,36) +#define mips_cvtwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,36) +#define mips_floorls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,11) +#define mips_floorld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,11) +#define mips_floorws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,15) +#define mips_floorwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,15) +#define mips_roundls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,8) +#define mips_roundld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,8) +#define mips_roundws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,12) +#define mips_roundwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,12) +#define mips_truncls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,9) +#define mips_truncld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,9) +#define mips_truncws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,13) +#define mips_truncwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,13) + +/* fp moves, loads */ +#define mips_fmovs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,6) +#define mips_fmovd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,6) +#define mips_wmovfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0) +#define mips_wmovtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0) +#define mips_dmovfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0,0) +#define mips_dmovtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0,0) +#define mips_ldc1(c,dest,base,offset) mips_ldc(c,1,dest,base,offset) +#define mips_ldxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,1) +#define mips_lwc1(c,dest,base,offset) mips_lwc(c,1,dest,base,offset) +#define mips_lwxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,0) +#define mips_sdc1(c,src,base,offset) mips_sdc(c,1,src,base,offset) +#define mips_sdxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,9) +#define mips_swc1(c,src,base,offset) mips_swc(c,1,src,base,offset) +#define mips_swxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,8) + +#endif /* __MIPS_CODEGEN_H__ */ + diff --git a/mips/test.c b/mips/test.c new file mode 100644 index 0000000..d83f833 --- /dev/null +++ b/mips/test.c @@ -0,0 +1,149 @@ +#include "mips-codegen.h" +#include + +int main () { + unsigned int *code, * p; + + code = p = malloc (sizeof (int) * 1024); + + mips_add (p, 3, 4, 5); + mips_addi (p, 3, 4, 5); + mips_addu (p, 3, 4, 5); + mips_addiu (p, 3, 4, 5); + mips_sub (p, 3, 4, 5); + mips_subu (p, 3, 4, 5); + mips_dadd (p, 3, 4, 5); + mips_daddi (p, 3, 4, 5); + mips_daddu (p, 3, 4, 5); + mips_daddiu (p, 3, 4, 5); + mips_dsub (p, 3, 4, 5); + mips_dsubu (p, 3, 4, 5); + + mips_mult (p, 6, 7); + mips_multu (p, 6, 7); + mips_div (p, 6, 7); + mips_divu (p, 6, 7); + mips_dmult (p, 6, 7); + mips_dmultu (p, 6, 7); + mips_ddiv (p, 6, 7); + mips_ddivu (p, 6, 7); + + mips_sll (p, 3, 4, 5); + mips_sllv (p, 3, 4, 5); + mips_sra (p, 3, 4, 5); + mips_srav (p, 3, 4, 5); + mips_srl (p, 3, 4, 5); + mips_srlv (p, 3, 4, 5); + mips_dsll (p, 3, 4, 5); + mips_dsll32 (p, 3, 4, 5); + mips_dsllv (p, 3, 4, 5); + mips_dsra (p, 3, 4, 5); + mips_dsra32 (p, 3, 4, 5); + mips_dsrav (p, 3, 4, 5); + mips_dsrl (p, 3, 4, 5); + mips_dsrl32 (p, 3, 4, 5); + mips_dsrlv (p, 3, 4, 5); + + mips_and (p, 8, 9, 10); + mips_andi (p, 8, 9, 10); + mips_nor (p, 8, 9, 10); + mips_or (p, 8, 9, 10); + mips_ori (p, 8, 9, 10); + mips_xor (p, 8, 9, 10); + mips_xori (p, 8, 9, 10); + + mips_slt (p, 8, 9, 10); + mips_slti (p, 8, 9, 10); + mips_sltu (p, 8, 9, 10); + mips_sltiu (p, 8, 9, 10); + + mips_beq (p, 8, 9, 0xff1f); + mips_beql (p, 8, 9, 0xff1f); + mips_bne (p, 8, 9, 0xff1f); + mips_bnel (p, 8, 9, 0xff1f); + mips_bgez (p, 11, 0xff1f); + mips_bgezal (p, 11, 0xff1f); + mips_bgezall (p, 11, 0xff1f); + mips_bgezl (p, 11, 0xff1f); + mips_bgtz (p, 11, 0xff1f); + mips_bgtzl (p, 11, 0xff1f); + mips_blez (p, 11, 0xff1f); + mips_blezl (p, 11, 0xff1f); + mips_bltz (p, 11, 0xff1f); + mips_bltzal (p, 11, 0xff1f); + mips_bltzall (p, 11, 0xff1f); + mips_bltzl (p, 11, 0xff1f); + + mips_jump (p, 0xff1f); + mips_jumpl (p, 0xff1f); + mips_jalr (p, 12, mips_ra); + mips_jr (p, 12); + + mips_lb (p, 13, 14, 128); + mips_lbu (p, 13, 14, 128); + mips_ld (p, 13, 14, 128); + mips_ldl (p, 13, 14, 128); + mips_ldr (p, 13, 14, 128); + mips_lh (p, 13, 14, 128); + mips_lhu (p, 13, 14, 128); + mips_ll (p, 13, 14, 128); + mips_lld (p, 13, 14, 128); + mips_lui (p, 13, 14, 128); + mips_lw (p, 13, 14, 128); + mips_lwl (p, 13, 14, 128); + mips_lwr (p, 13, 14, 128); + mips_lwu (p, 13, 14, 128); + mips_sb (p, 13, 14, 128); + mips_sc (p, 13, 14, 128); + mips_scd (p, 13, 14, 128); + mips_sd (p, 13, 14, 128); + mips_sdl (p, 13, 14, 128); + mips_sdr (p, 13, 14, 128); + mips_sh (p, 13, 14, 128); + mips_sw (p, 13, 14, 128); + mips_swl (p, 13, 14, 128); + mips_swr (p, 13, 14, 128); + + mips_move (p, 15, 16); + mips_nop (p); + mips_break (p, 0); + mips_sync (p, 0); + mips_mfhi (p, 17); + mips_mflo (p, 17); + mips_mthi (p, 17); + mips_mtlo (p, 17); + + mips_fabsd (p, 16, 18); + mips_fnegd (p, 16, 18); + mips_fsqrtd (p, 16, 18); + mips_faddd (p, 16, 18, 20); + mips_fdivd (p, 16, 18, 20); + mips_fmuld (p, 16, 18, 20); + mips_fsubd (p, 16, 18, 20); + + mips_fcmpd (p, MIPS_FPU_EQ, 18, 20); + mips_fbfalse (p, 0xff1f); + mips_fbfalsel (p, 0xff1f); + mips_fbtrue (p, 0xff1f); + mips_fbtruel (p, 0xff1f); + + mips_ceilwd (p, 20, 22); + mips_ceilld (p, 20, 22); + mips_floorwd (p, 20, 22); + mips_floorld (p, 20, 22); + mips_roundwd (p, 20, 22); + mips_roundld (p, 20, 22); + mips_truncwd (p, 20, 22); + mips_truncld (p, 20, 22); + mips_cvtdw (p, 20, 22); + mips_cvtds (p, 20, 22); + mips_cvtdl (p, 20, 22); + mips_cvtld (p, 20, 22); + mips_cvtsd (p, 20, 22); + mips_cvtwd (p, 20, 22); + + mips_fmovd (p, 20, 22); + printf ("size: %d\n", p - code); + + return 0; +} -- cgit v1.1 From 66607f84556593e2c3aa39bba418801193b6fddf Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Sun, 18 Jan 2004 18:00:40 +0000 Subject: Apply patches from Neale Ferguson for s390 support svn path=/trunk/mono/; revision=22226 --- s390/tramp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/s390/tramp.c b/s390/tramp.c index c5ebbf9..912e8fe 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -757,7 +757,7 @@ emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) /*------------------------------------------------------------------*/ /* */ -/* Name - mono_create_trampoline. */ +/* Name - mono_arch_create_trampoline. */ /* */ /* Function - Create the code that will allow a mono method to */ /* invoke a system subroutine. */ @@ -765,7 +765,7 @@ emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) /*------------------------------------------------------------------*/ MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { guint8 *p, *code_buffer; size_data sz; @@ -796,7 +796,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /*------------------------------------------------------------------*/ /* */ -/* Name - mono_arch_create_method_pointer */ +/* Name - mono_arch_create_method_pointer */ /* */ /* Function - Returns a pointer to a native function that can */ /* be used to call the specified method. */ @@ -811,7 +811,7 @@ mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /* */ /* Logic: */ /* ------ */ -/* mono_arch_create_method_pointer (MonoMethod *method) */ +/* mono_arch_create_method_pointer (MonoMethod *method) */ /* create the unmanaged->managed wrapper */ /* register it with mono_jit_info_table_add() */ /* */ -- cgit v1.1 From bb16201aaa018434f551c2657d9e38f28dfe8904 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 2 Feb 2004 15:56:15 +0000 Subject: 2004-02-02 Zoltan Varga * sparc/tramp.c: Implement all floating point argument passing conventions in Sparc V8. Also fix structure passing in V8. svn path=/trunk/mono/; revision=22704 --- ChangeLog | 4 ++ sparc/tramp.c | 120 +++++++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 89 insertions(+), 35 deletions(-) diff --git a/ChangeLog b/ChangeLog index 1dadd04..3343afa 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-02-02 Zoltan Varga + + * sparc/tramp.c: Implement all floating point argument passing conventions in + Sparc V8. Also fix structure passing in V8. Thu Nov 13 16:24:29 CET 2003 Paolo Molaro diff --git a/sparc/tramp.c b/sparc/tramp.c index 0bf55c1..3b8a42d 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -166,6 +166,9 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, case MONO_TYPE_R4: #if SPARCV9 (*code_size) += 4; /* for the fdtos */ +#else + (*code_size) += 12; + (*stack_size) += 4; #endif case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: @@ -191,7 +194,11 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, goto enum_calc_size; } size = mono_class_native_size (sig->params[i]->data.klass, NULL); +#if SPARCV9 if (size != 4) { +#else + if (1) { +#endif DEBUG(fprintf(stderr, "copy %d byte struct on stack\n", size)); *use_memcpy = TRUE; *code_size += 8*4; @@ -408,7 +415,11 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, gint size; size = mono_class_native_size (sig->params[i]->data.klass, NULL); +#if SPARCV9 if (size != 4) { +#else + if (1) { +#endif /* need to call memcpy here */ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); sparc_ld_imm_ptr (p, sparc_i3, i*16, sparc_o1); @@ -458,25 +469,44 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, case MONO_TYPE_CHAR: case MONO_TYPE_I4: case MONO_TYPE_U4: -#if !SPARCV9 + if (gr < OUT_REGS) { + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); + gr++; + } else { + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + } + break; + case MONO_TYPE_R4: -#endif +#if SPARCV9 + sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f30); /* fix using this fixed reg */ + sparc_fdtos(p, sparc_f30, sparc_f0 + 2 * gr + 1); + gr++; + break; +#else + /* Convert from double to single */ + sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0); + sparc_fdtos (p, sparc_f0, sparc_f0); + + /* + * FIXME: Is there an easier way to do an + * freg->ireg move ? + */ + sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos); + if (gr < OUT_REGS) { - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); + sparc_ld_imm (p, sparc_sp, stack_par_pos, sparc_o0 + gr); gr++; } else { - sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); - sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); + sparc_ldf_imm (p, sparc_sp, stack_par_pos, sparc_f0); + sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos); stack_par_pos += SLOT_SIZE; } break; -#if SPARCV9 - case MONO_TYPE_R4: - sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f30); /* fix using this fixed reg */ - sparc_fdtos(p, sparc_f30, sparc_f0 + 2 * gr + 1); - gr++; - break; #endif + case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: @@ -504,7 +534,15 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); stack_par_pos += SLOT_SIZE; } + break; + } #else + /* + * FIXME: The 32bit ABI docs do not mention that small + * structures are passed in registers. + */ + + /* if (size == 4) { if (gr < OUT_REGS) { sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); @@ -516,22 +554,24 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos); stack_par_pos += SLOT_SIZE; } + break; + } + */ #endif - } else { - if (gr < OUT_REGS) { - sparc_add_imm (p, 0, sparc_sp, + + if (gr < OUT_REGS) { + sparc_add_imm (p, 0, sparc_sp, cur_struct_pos, sparc_o0 + gr); - gr ++; - } else { - sparc_ld_imm_ptr (p, sparc_sp, - cur_struct_pos, - sparc_l1); - sparc_st_imm_ptr (p, sparc_l1, - sparc_sp, - stack_par_pos); - } - cur_struct_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1)); + gr ++; + } else { + sparc_ld_imm_ptr (p, sparc_sp, + cur_struct_pos, + sparc_l1); + sparc_st_imm_ptr (p, sparc_l1, + sparc_sp, + stack_par_pos); } + cur_struct_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1)); break; } @@ -552,22 +592,30 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, #else case MONO_TYPE_I8: case MONO_TYPE_R8: - /* this will break in subtle ways... */ - if (gr < 5) { + if (gr < (OUT_REGS - 1)) { sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); gr ++; - if (gr >= OUT_REGS) { - NOT_IMPL("split reg/stack") - break; - } else { - sparc_ld_imm (p, ARG_BASE, - (i*ARG_SIZE) + 4, - sparc_o0 + gr); - } + sparc_ld_imm (p, ARG_BASE, + (i*ARG_SIZE) + 4, + sparc_o0 + gr); gr ++; + } else if (gr == (OUT_REGS - 1)) { + /* Split register/stack */ + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); + gr ++; + + sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0); + sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; } else { - NOT_IMPL("FIXME: I8/R8 on stack"); + sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0); + sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; + + sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0); + sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos); + stack_par_pos += SLOT_SIZE; } break; #endif @@ -576,6 +624,8 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, } } + g_assert ((stack_par_pos - BIAS) <= stack_size); + return p; } -- cgit v1.1 From f9f3c20b070f92bcf6f85f5bd68a24c3434fe6c4 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 19 Feb 2004 14:13:23 +0000 Subject: 2004-02-19 Zoltan Varga * sparc/tramp.c: Fix alignment of structures containing doubles. svn path=/trunk/mono/; revision=23247 --- ChangeLog | 4 ++++ sparc/tramp.c | 16 +++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3343afa..affe94d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-02-19 Zoltan Varga + + * sparc/tramp.c: Fix alignment of structures containing doubles. + 2004-02-02 Zoltan Varga * sparc/tramp.c: Implement all floating point argument passing conventions in diff --git a/sparc/tramp.c b/sparc/tramp.c index 3b8a42d..ca6dd08 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -189,11 +189,12 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, break; case MONO_TYPE_VALUETYPE: { gint size; + guint32 align; if (sig->params[i]->data.klass->enumtype) { simpletype = sig->params[i]->data.klass->enum_basetype->type; goto enum_calc_size; } - size = mono_class_native_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (sig->params[i]->data.klass, &align); #if SPARCV9 if (size != 4) { #else @@ -202,6 +203,8 @@ calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, DEBUG(fprintf(stderr, "copy %d byte struct on stack\n", size)); *use_memcpy = TRUE; *code_size += 8*4; + + *stack_size = (*stack_size + (align - 1)) & (~(align -1)); *stack_size += (size + 3) & (~3); if (gr > OUT_REGS) { *code_size += 4; @@ -413,13 +416,16 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, if (sig->params[i]->type == MONO_TYPE_VALUETYPE && !sig->params[i]->data.klass->enumtype) { gint size; + guint32 align; - size = mono_class_native_size (sig->params[i]->data.klass, NULL); + size = mono_class_native_size (sig->params[i]->data.klass, &align); #if SPARCV9 if (size != 4) { #else if (1) { #endif + /* Add alignment */ + stack_par_pos = (stack_par_pos + (align - 1)) & (~(align - 1)); /* need to call memcpy here */ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0); sparc_ld_imm_ptr (p, sparc_i3, i*16, sparc_o1); @@ -518,12 +524,13 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, break; case MONO_TYPE_VALUETYPE: { gint size; + guint32 align; MonoClass *klass = sig->params[i]->data.klass; if (klass->enumtype) { simpletype = klass->enum_basetype->type; goto enum_calc_size; } - size = mono_class_native_size (klass, NULL); + size = mono_class_native_size (klass, &align); #if SPARCV9 if (size <= 16) { if (gr < OUT_REGS) { @@ -559,6 +566,7 @@ emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size, */ #endif + cur_struct_pos = (cur_struct_pos + (align - 1)) & (~(align - 1)); if (gr < OUT_REGS) { sparc_add_imm (p, 0, sparc_sp, cur_struct_pos, sparc_o0 + gr); @@ -957,6 +965,8 @@ mono_arch_create_method_pointer (MonoMethod *method) } /* return value storage */ + /* Align to dword */ + stackval_arg_pos = (stackval_arg_pos + (8 - 1)) & (~(8 -1)); if (sig->param_count) { sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0); } -- cgit v1.1 From 5d0cafa77c2cd95cb92a2990184bac64ec287016 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 19 Feb 2004 14:14:37 +0000 Subject: 2004-02-19 Zoltan Varga * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones. svn path=/trunk/mono/; revision=23248 --- ChangeLog | 2 ++ sparc/sparc-codegen.h | 79 ++++++++++++++++++++++++++++++++------------------- 2 files changed, 52 insertions(+), 29 deletions(-) diff --git a/ChangeLog b/ChangeLog index affe94d..b6fd091 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2004-02-19 Zoltan Varga + * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones. + * sparc/tramp.c: Fix alignment of structures containing doubles. 2004-02-02 Zoltan Varga diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 01c1529..e965d25 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -272,6 +272,14 @@ typedef struct { /* for use in logical ops, use 0 to not set flags */ #define sparc_cc 16 +#define sparc_is_imm13(val) ((gint)val >= (gint)-(1<<12) && (gint)val <= (gint)((1<<12)-1)) +#define sparc_is_imm22(val) ((gint)val >= (gint)-(1<<21) && (gint)val <= (gint)((1<<21)-1)) + +/* disassembly */ +#define sparc_inst_op(inst) ((inst) >> 30) +#define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f) +#define sparc_inst_imm(inst) (((inst) >> 13) & 0x1) + #define sparc_encode_call(ins,addr) \ do { \ sparc_format1 *__f = (sparc_format1*)(ins); \ @@ -384,10 +392,9 @@ typedef struct { #define sparc_ld(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),0,(dest)) #define sparc_ld_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),0,(dest)) -#if SPARCV9 +/* Sparc V9 */ #define sparc_ldx(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),11,(dest)) #define sparc_ldx_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),11,(dest)) -#endif #define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest)) #define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest)) @@ -408,10 +415,9 @@ typedef struct { #define sparc_st(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),4,(src)) #define sparc_st_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),4,(src)) -#if SPARCV9 +/* Sparc V9 */ #define sparc_stx(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),14,(src)) #define sparc_stx_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),14,(src)) -#endif #define sparc_std(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),7,(src)) #define sparc_std_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),7,(src)) @@ -444,7 +450,7 @@ typedef struct { #define sparc_jmpl(ins,base,disp,dest) sparc_encode_format3a((ins),2,0,(base),(disp),56,(dest)) #define sparc_jmpl_imm(ins,base,disp,dest) sparc_encode_format3b((ins),2,(base),(disp),56,(dest)) -#define sparc_call_simple(ins,addr) sparc_encode_call((ins),((unsigned int)(addr)>>2)) +#define sparc_call_simple(ins,disp) sparc_encode_call((ins),((unsigned int)(disp))) #define sparc_rdy(ins,dest) sparc_encode_format3a((ins),2,0,0,0,40,(dest)) @@ -460,6 +466,8 @@ typedef struct { /* trap */ +#define sparc_ta(ins,tt) sparc_encode_format3b((ins),2,0,(tt),58,0x8) + /* alu fop */ /* provide wrappers for: fitos, fitod, fstoi, fdtoi, fstod, fdtos, fmov, fneg, fabs */ @@ -521,8 +529,15 @@ typedef struct { #define sparc_fcmpeq( ins, r1, r2 ) sparc_fcmpeq( ins, r1, sparc_fcmpeq_val, r2 ) /* logical */ -#define sparc_and(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|1,(dest)) -#define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|1,(dest)) + +/* FIXME: condense this using macros */ +/* FIXME: the setcc stuff is wrong in lots of places */ + +#define sparc_logic(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),((setcc) ? 0x10 : 0) | (op), (dest)) +#define sparc_logic_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),((setcc) ? 0x10 : 0) | (op), (dest)) + +#define sparc_and(ins,setcc,r1,r2,dest) sparc_logic(ins,1,setcc,r1,r2,dest) +#define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_logic_imm(ins,1,setcc,r1,imm,dest) #define sparc_andn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|5,(dest)) #define sparc_andn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|5,(dest)) @@ -534,7 +549,7 @@ typedef struct { #define sparc_orn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|6,(dest)) #define sparc_xor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|3,(dest)) -#define sparc_xor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(r2),(imm)|3,(dest)) +#define sparc_xor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm), (setcc)|3,(dest)) #define sparc_xnor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|7,(dest)) #define sparc_xnor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|7,(dest)) @@ -543,49 +558,51 @@ typedef struct { #define sparc_sll(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),37,(dest)) #define sparc_sll_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),37,(dest)) -#if SPARCV9 +/* Sparc V9 */ #define sparc_sllx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),37,(dest)) #define sparc_sllx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),37,(dest)) -#endif #define sparc_srl(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),38,(dest)) #define sparc_srl_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),38,(dest)) -#if SPARCV9 +/* Sparc V9 */ #define sparc_srlx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),38,(dest)) #define sparc_srlx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),38,(dest)) -#endif #define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest)) #define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest)) /* alu */ -#define sparc_add(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|0,(dest)) -#define sparc_add_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|0,(dest)) -#define sparc_addx(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|8,(dest)) -#define sparc_addx_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|8,(dest)) +#define sparc_alu_reg(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),op|((setcc) ? 0x10 : 0),(dest)) +#define sparc_alu_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),op|((setcc) ? 0x10 : 0),(dest)) + +#define sparc_add(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0,(setcc),(r1),(r2),(dest)) +#define sparc_add_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0,(setcc),(r1),(imm),(dest)) -#define sparc_sub(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|4,(dest)) -#define sparc_sub_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|4,(dest)) +#define sparc_addx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x8,(setcc),(r1),(r2),(dest)) +#define sparc_addx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x8,(setcc),(r1),(imm),(dest)) -#define sparc_subx(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|12,(dest)) -#define sparc_subx_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|12,(dest)) +#define sparc_sub(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x4,(setcc),(r1),(r2),(dest)) +#define sparc_sub_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x4,(setcc),(r1),(imm),(dest)) + +#define sparc_subx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xc,(setcc),(r1),(r2),(dest)) +#define sparc_subx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xc,(setcc),(r1),(imm),(dest)) #define sparc_muls(ins,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),36,(dest)) #define sparc_muls_imm(ins,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),36,(dest)) -#define sparc_umul(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|10,(dest)) -#define sparc_umul_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|10,(dest)) +#define sparc_umul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xa,(setcc),(r1),(r2),(dest)) +#define sparc_umul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xa,(setcc),(r1),(imm),(dest)) -#define sparc_smul(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|11,(dest)) -#define sparc_smul_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|11,(dest)) +#define sparc_smul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xb,(setcc),(r1),(r2),(dest)) +#define sparc_smul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xb,(setcc),(r1),(imm),(dest)) -#define sparc_udiv(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|14,(dest)) -#define sparc_udiv_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|14,(dest)) +#define sparc_udiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xe,(setcc),(r1),(r2),(dest)) +#define sparc_udiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xe,(setcc),(r1),(imm),(dest)) -#define sparc_sdiv(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|15,(dest)) -#define sparc_sdiv_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|15,(dest)) +#define sparc_sdiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xf,(setcc),(r1),(r2),(dest)) +#define sparc_sdiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xf,(setcc),(r1),(imm),(dest)) /* branch */ @@ -607,9 +624,13 @@ typedef struct { #define sparc_retl(ins) sparc_jmpl_imm((ins),sparc_o7,8,sparc_g0) #define sparc_restore_simple(ins) sparc_restore((ins),sparc_g0,sparc_g0,sparc_g0) +#define SPARC_SET_MAX_SIZE 8 + #define sparc_set(ins,val,reg) \ do { \ - if (((guint32)(val) & 0x1fff) == 0) \ + if ((val) == 0) \ + sparc_clr_reg((ins),(reg)); \ + else if (((guint32)(val) & 0x1fff) == 0) \ sparc_sethi((ins),(guint32)(val),(reg)); \ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \ -- cgit v1.1 From 7fd6186b66f081ef6c0fca7708ddf8a641a09eae Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Tue, 24 Feb 2004 18:01:50 +0000 Subject: Add amd64 support patch from Zalman Stern svn path=/trunk/mono/; revision=23411 --- amd64/Makefile.am | 7 + amd64/amd64-codegen.h | 409 +++++++++++++++++++ amd64/tramp.c | 1055 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1471 insertions(+) create mode 100644 amd64/Makefile.am create mode 100644 amd64/amd64-codegen.h create mode 100644 amd64/tramp.c diff --git a/amd64/Makefile.am b/amd64/Makefile.am new file mode 100644 index 0000000..54499b5 --- /dev/null +++ b/amd64/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-amd64.la + +libmonoarch_amd64_la_SOURCES = tramp.c amd64-codegen.h + diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h new file mode 100644 index 0000000..68bcfec --- /dev/null +++ b/amd64/amd64-codegen.h @@ -0,0 +1,409 @@ +/* + * amd64-codegen.h: Macros for generating x86 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * Zalman Stern + * + * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef AMD64_H +#define AMD64_H + +typedef enum { + AMD64_RAX = 0, + AMD64_RCX = 1, + AMD64_RDX = 2, + AMD64_RBX = 3, + AMD64_RSP = 4, + AMD64_RBP = 5, + AMD64_RSI = 6, + AMD64_RDI = 7, + AMD64_R8 = 8, + AMD64_R9 = 9, + AMD64_R10 = 10, + AMD64_R11 = 11, + AMD64_R12 = 12, + AMD64_R13 = 13, + AMD64R_14 = 14, + AMD64_R15 = 15, + AMD64_NREG +} AMD64_Reg_No; + +typedef enum { + AMD64_XMM0 = 0, + AMD64_XMM1 = 1, + AMD64_XMM2 = 2, + AMD64_XMM3 = 3, + AMD64_XMM4 = 4, + AMD64_XMM5 = 5, + AMD64_XMM6 = 6, + AMD64_XMM8 = 8, + AMD64_XMM9 = 9, + AMD64_XMM10 = 10, + AMD64_XMM11 = 11, + AMD64_XMM12 = 12, + AMD64_XMM13 = 13, + AMD64_XMM14 = 14, + AMD64_XMM15 = 15, + AMD64_XMM_NREG = 16, +} AMD64_XMM_Reg_No; + +typedef enum +{ + AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} AMD64_REX_Bits; + +#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } + +typedef union { + long val; + unsigned char b [8]; +} amd64_imm_buf; + +#include "../x86/x86-codegen.h" + + +/* Need to fill this info in for amd64. */ + +#if 0 +/* +// bitvector mask for callee-saved registers +*/ +#define X86_ESI_MASK (1< +#include +#include "amd64-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" +#include "mono/metadata/marshal.h" + +/* + * The resulting function takes the form: + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + */ +#define FUNC_ADDR_POS 8 +#define RETVAL_POS 12 +#define THIS_POS 16 +#define ARGP_POS 20 +#define LOC_POS -4 + +#define ARG_SIZE sizeof (stackval) + +#define MAX_INT_ARG_REGS 6 +#define MAX_FLOAT_ARG_REGS 8 + +// TODO get these right. They are upper bounds anyway, so it doesn't much matter. +#define PUSH_INT_STACK_ARG_SIZE 16 +#define MOVE_INT_REG_ARG_SIZE 16 +#define PUSH_FLOAT_STACK_ARG_SIZE 16 +#define MOVE_FLOAT_REG_ARG_SIZE 16 +#define COPY_STRUCT_STACK_ARG_SIZE 16 + +/* Maps an argument number (starting at 0) to the register it is passed in (if it fits). + * E.g. int foo(int bar, int quux) has the foo arg in RDI and the quux arg in RSI + * There is no such map for floating point args as they go in XMM0-XMM7 in order and thus the + * index is the register number. + */ +static int int_arg_regs[] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 }; + +/* This next block of code resolves the ABI rules for passing structures in the argument registers. + * These basically amount to "Use up to two registers if they are all integer or all floating point. + * If the structure is bigger than two registers or would be in one integer register and one floating point, + * it is passed in memory instead. + * + * It is possible this code needs to be recursive to be correct in the case when one of the structure members + * is itself a structure. + * + * The 80-bit floating point stuff is ignored. + */ +typedef enum { + ARG_IN_MEMORY, + ARG_IN_INT_REGS, + ARG_IN_FLOAT_REGS +} struct_arg_type; + +static struct_arg_type compute_arg_type(MonoType *type) +{ + guint32 simpletype = type->type; + + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + return ARG_IN_INT_REGS; + break; + case MONO_TYPE_VALUETYPE: { + if (type->data.klass->enumtype) + return ARG_IN_INT_REGS; + return ARG_IN_MEMORY; + break; + } + case MONO_TYPE_R4: + case MONO_TYPE_R8: + return ARG_IN_FLOAT_REGS; + break; + default: + g_error ("Can't trampoline 0x%x", type->type); + } + + return ARG_IN_MEMORY; +} + +static struct_arg_type value_type_info(MonoClass *klass, int *native_size, int *regs_used, int *offset1, int *size1, int *offset2, int *size2) +{ + MonoMarshalType *info = mono_marshal_load_type_info (klass); + + *native_size = info->native_size; + + if (info->native_size > 8 || info->num_fields > 2) + { + *regs_used = 0; + *offset1 = -1; + *offset2 = -1; + return ARG_IN_MEMORY; + } + + if (info->num_fields == 1) + { + struct_arg_type result = compute_arg_type(info->fields[0].field->type); + if (result != ARG_IN_MEMORY) + { + *regs_used = 1; + *offset1 = info->fields[0].offset; + *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); + } + else + { + *regs_used = 0; + *offset1 = -1; + } + + *offset2 = -1; + return result; + } + + struct_arg_type result1 = compute_arg_type(info->fields[0].field->type); + struct_arg_type result2 = compute_arg_type(info->fields[0].field->type); + + if (result1 == result2 && result1 != ARG_IN_MEMORY) + { + *regs_used = 2; + *offset1 = info->fields[0].offset; + *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); + *offset2 = info->fields[1].offset; + *size2 = mono_marshal_type_size (info->fields[1].field->type, info->fields[1].mspec, NULL, 1, 1); + return result1; + } + + return ARG_IN_MEMORY; +} + +MonoPIFunc +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + unsigned char *p, *code_buffer; + guint32 stack_size = 0, code_size = 50; + guint32 arg_pos, simpletype; + int i; + static GHashTable *cache = NULL; + MonoPIFunc res; + + guint32 int_arg_regs_used = 0; + guint32 float_arg_regs_used = 0; + guint32 next_int_arg_reg = 0; + guint32 next_float_arg_reg = 0; + /* Indicates that the return value is filled in inside the called function. */ + int retval_implicit = 0; + char *arg_in_reg_bitvector; /* A set index by argument number saying if it is in a register + (integer or floating point according to type) */ + + if (!cache) + cache = g_hash_table_new ((GHashFunc)mono_signature_hash, + (GCompareFunc)mono_metadata_signature_equal); + + if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) + return res; + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { + int_arg_regs_used++; + code_size += MOVE_INT_REG_ARG_SIZE; + } + + if (sig->hasthis) { + int_arg_regs_used++; + code_size += MOVE_INT_REG_ARG_SIZE; + } + + /* Run through stuff to calculate code size and argument bytes that will be pushed on stack (stack_size). */ + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + if (int_arg_regs_used++ > MAX_INT_ARG_REGS) { + stack_size += 8; + code_size += PUSH_INT_STACK_ARG_SIZE; + } + else + code_size += MOVE_INT_REG_ARG_SIZE; + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + if (arg_type == ARG_IN_INT_REGS && + (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) + { + code_size += MOVE_INT_REG_ARG_SIZE; + int_arg_regs_used += regs_used; + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) + { + code_size += MOVE_FLOAT_REG_ARG_SIZE; + float_arg_regs_used += regs_used; + break; + } + + /* Else item is in memory. */ + + stack_size += size + 7; + stack_size &= ~7; + code_size += COPY_STRUCT_STACK_ARG_SIZE; + + break; + } + case MONO_TYPE_R4: + case MONO_TYPE_R8: + if (float_arg_regs_used++ > MAX_FLOAT_ARG_REGS) { + stack_size += 8; + code_size += PUSH_FLOAT_STACK_ARG_SIZE; + } + else + code_size += MOVE_FLOAT_REG_ARG_SIZE; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + /* + * FIXME: take into account large return values. + * (Comment carried over from IA32 code. Not sure what it means :-) + */ + + code_buffer = p = alloca (code_size); + + /* + * Standard function prolog. + */ + amd64_push_reg (p, AMD64_RBP); + amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); + /* + * and align to 16 byte boundary... + */ + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + retval_implicit = 1; + } + } + + if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { + /* Push the retval register so it is saved across the call. It will be addressed via RBP later. */ + amd64_push_reg (p, AMD64_RSI); + stack_size += 8; + } + + /* Ensure stack is 16 byte aligned when entering called function as required by calling convention. + * Getting this wrong results in a general protection fault on an SSE load or store somewhere in the + * code called under the trampoline. + */ + if ((stack_size & 15) != 0) + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 16 - (stack_size & 15)); + + /* + * On entry to generated function: + * RDI has target function address + * RSI has return value location address + * RDX has this pointer address + * RCX has the pointer to the args array. + * + * Inside the stub function: + * R10 holds the pointer to the args + * R11 holds the target function address. + * The return value address is pushed on the stack. + * The this pointer is moved into the first arg register at the start. + * + * Optimization note: we could keep the args pointer in RCX and then + * load over itself at the end. Ditto the callee addres could be left in RDI in some cases. + */ + + /* Move args pointer to temp register. */ + amd64_mov_reg_reg (p, AMD64_R10, AMD64_RCX, 8); + amd64_mov_reg_reg (p, AMD64_R11, AMD64_RDI, 8); + + /* First args register gets return value pointer, if need be. + * Note that "byref" equal true means the called function returns a pointer. + */ + if (retval_implicit) { + amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RSI, 8); + next_int_arg_reg++; + } + + /* this pointer goes in next args register. */ + if (sig->hasthis) { + amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RDX, 8); + next_int_arg_reg++; + } + + /* + * Generate code to handle arguments in registers. Stack arguments will happen in a loop after this. + */ + arg_in_reg_bitvector = (char *)alloca((sig->param_count + 7) / 8); + memset(arg_in_reg_bitvector, 0, (sig->param_count + 7) / 8); + + /* First, load all the arguments that are passed in registers into the appropriate registers. + * Below there is another loop to handle arguments passed on the stack. + */ + for (i = 0; i < sig->param_count; i++) { + arg_pos = ARG_SIZE * i; + + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_marshal: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_CLASS: + if (next_int_arg_reg < MAX_INT_ARG_REGS) { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos, 8); + next_int_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_R4: + if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); + next_float_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_R8: + if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); + next_float_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_VALUETYPE: { + if (!sig->params [i]->data.klass->enumtype) { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS && + (next_int_arg_reg + regs_used) <= MAX_INT_ARG_REGS) + { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset1, size1); + next_int_arg_reg++; + if (regs_used > 1) + { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset2, size2); + next_int_arg_reg++; + } + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (next_float_arg_reg + regs_used) <= MAX_FLOAT_ARG_REGS) + { + if (size1 == 4) + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); + else + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); + next_float_arg_reg++; + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); + else + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); + next_float_arg_reg++; + } + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + break; + } + + /* Structs in memory are handled in the next loop. */ + } else { + /* it's an enum value */ + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_marshal; + } + break; + } + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + /* Handle stack arguments, pushing the rightmost argument first. */ + for (i = sig->param_count; i > 0; --i) { + arg_pos = ARG_SIZE * (i - 1); + if (sig->params [i - 1]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i - 1]->type; +enum_marshal2: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_CLASS: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_R4: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_R8: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_VALUETYPE: + if (!sig->params [i - 1]->data.klass->enumtype) { + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) + { + int ss = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); + ss += 7; + ss &= ~7; + + amd64_alu_reg_imm(p, X86_SUB, AMD64_RSP, ss); + /* Count register */ + amd64_mov_reg_imm(p, AMD64_RCX, ss); + /* Source register */ + amd64_lea_membase(p, AMD64_RSI, AMD64_R10, arg_pos); + /* Dest register */ + amd64_mov_reg_reg(p, AMD64_RDI, AMD64_RSP, 8); + + /* AMD64 calling convention guarantees direction flag is clear at call boundary. */ + x86_prefix(p, AMD64_REX(AMD64_REX_W)); + x86_prefix(p, X86_REP_PREFIX); + x86_movsb(p); + } + } else { + /* it's an enum value */ + simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; + goto enum_marshal2; + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); + } + } + + /* TODO: Set RAL to number of XMM registers used in case this is a varags function? */ + + /* + * Insert call to function + */ + amd64_call_reg (p, AMD64_R11); + + if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { + amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, 8); + } + /* + * Handle retval. + * Small integer and pointer values are in EAX. + * Long integers are in EAX:EDX. + * FP values are on the FP stack. + */ + + if (sig->ret->byref || string_ctor) { + simpletype = MONO_TYPE_PTR; + } else { + simpletype = sig->ret->type; + } + enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 1); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 2); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + case MONO_TYPE_PTR: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); + break; + case MONO_TYPE_R4: + amd64_movss_regp_reg (p, AMD64_RSI, AMD64_XMM0); + break; + case MONO_TYPE_R8: + amd64_movsd_regp_reg (p, AMD64_RSI, AMD64_XMM0); + break; + case MONO_TYPE_I8: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS) + { + amd64_mov_membase_reg (p, AMD64_RSI, offset1, AMD64_RAX, size1); + if (regs_used > 1) + amd64_mov_membase_reg (p, AMD64_RSI, offset2, AMD64_RDX, size2); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS) + { + if (size1 == 4) + amd64_movss_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); + else + amd64_movsd_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); + else + amd64_movsd_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); + } + break; + } + + /* Else result should have been stored in place already. */ + break; + } + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + + /* + * Standard epilog. + */ + amd64_leave (p); + amd64_ret (p); + + g_assert (p - code_buffer < code_size); + res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); + + g_hash_table_insert (cache, sig, res); + + return res; +} + +/* + * Returns a pointer to a native function that can be used to + * call the specified method. + * The function created will receive the arguments according + * to the call convention specified in the method. + * This function works by creating a MonoInvocation structure, + * filling the fields in and calling ves_exec_method on it. + * Still need to figure out how to handle the exception stuff + * across the managed/unmanaged boundary. + */ +void * +mono_arch_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig; + MonoJitInfo *ji; + unsigned char *p, *code_buffer; + guint32 simpletype; + gint32 local_size; + gint32 stackval_pos; + gint32 mono_invocation_pos; + int i, cpos; + int *vtbuf; + int *rbpoffsets; + int int_arg_regs_used = 0; + int float_arg_regs_used = 0; + int stacked_args_size = 0; /* bytes of register passed arguments pushed on stack for safe keeping. Used to get alignment right. */ + int next_stack_arg_rbp_offset = 16; + int retval_ptr_rbp_offset = 0; + int this_reg = -1; /* Remember register this ptr is in. */ + + /* + * If it is a static P/Invoke method, we can just return the pointer + * to the method implementation. + */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + + sig = method->signature; + + code_buffer = p = alloca (512); /* FIXME: check for overflows... */ + vtbuf = alloca (sizeof(int)*sig->param_count); + rbpoffsets = alloca (sizeof(int)*sig->param_count); + + + /* + * Standard function prolog. + */ + amd64_push_reg (p, AMD64_RBP); + amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); + + /* If there is an implicit return value pointer in the first args reg, save it now so + * the result can be stored through the pointer at the end. + */ + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) + { + amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); + int_arg_regs_used++; + stacked_args_size += 8; + retval_ptr_rbp_offset = -stacked_args_size; + } + + /* + * If there is a this pointer, remember the number of the register it is in. + */ + if (sig->hasthis) { + this_reg = int_arg_regs[int_arg_regs_used++]; + } + + /* Put all arguments passed in registers on the stack. + * Record offsets from RBP to each argument. + */ + cpos = 0; + + for (i = 0; i < sig->param_count; i++) { + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + if (int_arg_regs_used < MAX_INT_ARG_REGS) { + amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); + int_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + case MONO_TYPE_VALUETYPE: { + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + else + { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS && + (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) + { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); + stacked_args_size += size; + rbpoffsets[i] = stacked_args_size; + + amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset1, size1); + int_arg_regs_used++; + if (regs_used > 1) + { + amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset2, size2); + int_arg_regs_used++; + } + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) + { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); + stacked_args_size += size; + rbpoffsets[i] = stacked_args_size; + + if (size1 == 4) + amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); + else + amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); + float_arg_regs_used++; + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); + else + amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); + float_arg_regs_used++; + } + break; + } + + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += size; + } + break; + } + case MONO_TYPE_R4: + if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); + amd64_movss_regp_reg (p, AMD64_RSP, float_arg_regs_used); + float_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + case MONO_TYPE_R8: + stacked_args_size += 8; + if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); + amd64_movsd_regp_reg (p, AMD64_RSP, float_arg_regs_used); + float_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1) + stacked_args_size; + + local_size += 15; + local_size &= ~15; + + stackval_pos = -local_size; + mono_invocation_pos = stackval_pos + sizeof (stackval) * (sig->param_count + 1); + + /* stacked_args_size has already been pushed onto the stack. Make room for the rest of it. */ + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, local_size - stacked_args_size); + + /* Be careful not to trash any arg regs before saving this_reg to MonoInvocation structure below. */ + + /* + * Initialize MonoInvocation fields, first the ones known now. + */ + amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, child)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, 8); + /* + * Set the method pointer. + */ + amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, 8); + + /* + * Handle this. + */ + if (sig->hasthis) + amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, 8); + + /* + * Handle the arguments. stackval_pos is the offset from RBP of the stackval in the MonoInvocation args array . + * arg_pos is the offset from RBP to the incoming arg on the stack. + * We just call stackval_from_data to handle all the (nasty) issues.... + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, 8); + for (i = 0; i < sig->param_count; ++i) { +/* Need to call stackval_from_data (MonoType *type, stackval *result, char *data, gboolean pinvoke); */ + amd64_mov_reg_imm (p, AMD64_R11, stackval_from_data); + amd64_mov_reg_imm (p, int_arg_regs[0], sig->params[i]); + amd64_lea_membase (p, int_arg_regs[1], AMD64_RBP, stackval_pos); + amd64_lea_membase (p, int_arg_regs[2], AMD64_RBP, rbpoffsets[i]); + amd64_mov_reg_imm (p, int_arg_regs[3], sig->pinvoke); + amd64_call_reg (p, AMD64_R11); + stackval_pos += sizeof (stackval); +#if 0 + /* fixme: alignment */ + if (sig->pinvoke) + arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + arg_pos += mono_type_stack_size (sig->params [i], &align); +#endif + } + + /* + * Handle the return value storage area. + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, 8); + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, 8); + amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, 8); + } + } + + /* + * Call the method. + */ + amd64_lea_membase (p, int_arg_regs[0], AMD64_RBP, mono_invocation_pos); + amd64_mov_reg_imm (p, AMD64_R11, ves_exec_method); + amd64_call_reg (p, AMD64_R11); + + /* + * Move the return value to the proper place. + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + if (sig->ret->byref) { + amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); + } else { + int simpletype = sig->ret->type; + enum_retvalue: + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 1); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 2); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 4); + break; + case MONO_TYPE_I8: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); + break; + case MONO_TYPE_R4: + amd64_movss_regp_reg (p, AMD64_RAX, AMD64_XMM0); + break; + case MONO_TYPE_R8: + amd64_movsd_regp_reg (p, AMD64_RAX, AMD64_XMM0); + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS) + { + if (regs_used > 1) + amd64_mov_membase_reg (p, AMD64_RAX, offset2, AMD64_RDX, size2); + amd64_mov_membase_reg (p, AMD64_RAX, offset1, AMD64_RAX, size1); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS) + { + if (size1 == 4) + amd64_movss_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); + else + amd64_movsd_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); + else + amd64_movsd_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); + } + break; + } + + /* Else result should have been stored in place already. IA32 code has a stackval_to_data call here, which + * looks wrong to me as the pointer in the stack val being converted is setup to point to the output area anyway. + * It all looks a bit suspect anyway. + */ + break; + } + default: + g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); + break; + } + } + + /* + * Standard epilog. + */ + amd64_leave (p); + amd64_ret (p); + + g_assert (p - code_buffer < 512); + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = g_memdup (code_buffer, p - code_buffer); + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; +} -- cgit v1.1 From c58af24e593b96f1ccc7819ab100063aa4db3c54 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Fri, 27 Feb 2004 17:03:17 +0000 Subject: Add x86-64 directory svn path=/trunk/mono/; revision=23539 --- amd64/x86-64-codegen.h | 409 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 amd64/x86-64-codegen.h diff --git a/amd64/x86-64-codegen.h b/amd64/x86-64-codegen.h new file mode 100644 index 0000000..68bcfec --- /dev/null +++ b/amd64/x86-64-codegen.h @@ -0,0 +1,409 @@ +/* + * amd64-codegen.h: Macros for generating x86 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * Zalman Stern + * + * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef AMD64_H +#define AMD64_H + +typedef enum { + AMD64_RAX = 0, + AMD64_RCX = 1, + AMD64_RDX = 2, + AMD64_RBX = 3, + AMD64_RSP = 4, + AMD64_RBP = 5, + AMD64_RSI = 6, + AMD64_RDI = 7, + AMD64_R8 = 8, + AMD64_R9 = 9, + AMD64_R10 = 10, + AMD64_R11 = 11, + AMD64_R12 = 12, + AMD64_R13 = 13, + AMD64R_14 = 14, + AMD64_R15 = 15, + AMD64_NREG +} AMD64_Reg_No; + +typedef enum { + AMD64_XMM0 = 0, + AMD64_XMM1 = 1, + AMD64_XMM2 = 2, + AMD64_XMM3 = 3, + AMD64_XMM4 = 4, + AMD64_XMM5 = 5, + AMD64_XMM6 = 6, + AMD64_XMM8 = 8, + AMD64_XMM9 = 9, + AMD64_XMM10 = 10, + AMD64_XMM11 = 11, + AMD64_XMM12 = 12, + AMD64_XMM13 = 13, + AMD64_XMM14 = 14, + AMD64_XMM15 = 15, + AMD64_XMM_NREG = 16, +} AMD64_XMM_Reg_No; + +typedef enum +{ + AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} AMD64_REX_Bits; + +#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } + +typedef union { + long val; + unsigned char b [8]; +} amd64_imm_buf; + +#include "../x86/x86-codegen.h" + + +/* Need to fill this info in for amd64. */ + +#if 0 +/* +// bitvector mask for callee-saved registers +*/ +#define X86_ESI_MASK (1< Date: Fri, 27 Feb 2004 17:03:30 +0000 Subject: Remove amd64 svn path=/trunk/mono/; revision=23540 --- amd64/Makefile.am | 7 - amd64/amd64-codegen.h | 409 ------------------- amd64/tramp.c | 1055 ------------------------------------------------- 3 files changed, 1471 deletions(-) delete mode 100644 amd64/Makefile.am delete mode 100644 amd64/amd64-codegen.h delete mode 100644 amd64/tramp.c diff --git a/amd64/Makefile.am b/amd64/Makefile.am deleted file mode 100644 index 54499b5..0000000 --- a/amd64/Makefile.am +++ /dev/null @@ -1,7 +0,0 @@ - -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-amd64.la - -libmonoarch_amd64_la_SOURCES = tramp.c amd64-codegen.h - diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h deleted file mode 100644 index 68bcfec..0000000 --- a/amd64/amd64-codegen.h +++ /dev/null @@ -1,409 +0,0 @@ -/* - * amd64-codegen.h: Macros for generating x86 code - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Intel Corporation (ORP Project) - * Sergey Chaban (serge@wildwestsoftware.com) - * Dietmar Maurer (dietmar@ximian.com) - * Patrik Torstensson - * Zalman Stern - * - * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. - * - * Copyright (C) 2000 Intel Corporation. All rights reserved. - * Copyright (C) 2001, 2002 Ximian, Inc. - */ - -#ifndef AMD64_H -#define AMD64_H - -typedef enum { - AMD64_RAX = 0, - AMD64_RCX = 1, - AMD64_RDX = 2, - AMD64_RBX = 3, - AMD64_RSP = 4, - AMD64_RBP = 5, - AMD64_RSI = 6, - AMD64_RDI = 7, - AMD64_R8 = 8, - AMD64_R9 = 9, - AMD64_R10 = 10, - AMD64_R11 = 11, - AMD64_R12 = 12, - AMD64_R13 = 13, - AMD64R_14 = 14, - AMD64_R15 = 15, - AMD64_NREG -} AMD64_Reg_No; - -typedef enum { - AMD64_XMM0 = 0, - AMD64_XMM1 = 1, - AMD64_XMM2 = 2, - AMD64_XMM3 = 3, - AMD64_XMM4 = 4, - AMD64_XMM5 = 5, - AMD64_XMM6 = 6, - AMD64_XMM8 = 8, - AMD64_XMM9 = 9, - AMD64_XMM10 = 10, - AMD64_XMM11 = 11, - AMD64_XMM12 = 12, - AMD64_XMM13 = 13, - AMD64_XMM14 = 14, - AMD64_XMM15 = 15, - AMD64_XMM_NREG = 16, -} AMD64_XMM_Reg_No; - -typedef enum -{ - AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ - AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ - AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ - AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ -} AMD64_REX_Bits; - -#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ - { \ - unsigned char _amd64_rex_bits = \ - (((width) > 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } - -typedef union { - long val; - unsigned char b [8]; -} amd64_imm_buf; - -#include "../x86/x86-codegen.h" - - -/* Need to fill this info in for amd64. */ - -#if 0 -/* -// bitvector mask for callee-saved registers -*/ -#define X86_ESI_MASK (1< -#include -#include "amd64-codegen.h" -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/marshal.h" - -/* - * The resulting function takes the form: - * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); - */ -#define FUNC_ADDR_POS 8 -#define RETVAL_POS 12 -#define THIS_POS 16 -#define ARGP_POS 20 -#define LOC_POS -4 - -#define ARG_SIZE sizeof (stackval) - -#define MAX_INT_ARG_REGS 6 -#define MAX_FLOAT_ARG_REGS 8 - -// TODO get these right. They are upper bounds anyway, so it doesn't much matter. -#define PUSH_INT_STACK_ARG_SIZE 16 -#define MOVE_INT_REG_ARG_SIZE 16 -#define PUSH_FLOAT_STACK_ARG_SIZE 16 -#define MOVE_FLOAT_REG_ARG_SIZE 16 -#define COPY_STRUCT_STACK_ARG_SIZE 16 - -/* Maps an argument number (starting at 0) to the register it is passed in (if it fits). - * E.g. int foo(int bar, int quux) has the foo arg in RDI and the quux arg in RSI - * There is no such map for floating point args as they go in XMM0-XMM7 in order and thus the - * index is the register number. - */ -static int int_arg_regs[] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 }; - -/* This next block of code resolves the ABI rules for passing structures in the argument registers. - * These basically amount to "Use up to two registers if they are all integer or all floating point. - * If the structure is bigger than two registers or would be in one integer register and one floating point, - * it is passed in memory instead. - * - * It is possible this code needs to be recursive to be correct in the case when one of the structure members - * is itself a structure. - * - * The 80-bit floating point stuff is ignored. - */ -typedef enum { - ARG_IN_MEMORY, - ARG_IN_INT_REGS, - ARG_IN_FLOAT_REGS -} struct_arg_type; - -static struct_arg_type compute_arg_type(MonoType *type) -{ - guint32 simpletype = type->type; - - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - return ARG_IN_INT_REGS; - break; - case MONO_TYPE_VALUETYPE: { - if (type->data.klass->enumtype) - return ARG_IN_INT_REGS; - return ARG_IN_MEMORY; - break; - } - case MONO_TYPE_R4: - case MONO_TYPE_R8: - return ARG_IN_FLOAT_REGS; - break; - default: - g_error ("Can't trampoline 0x%x", type->type); - } - - return ARG_IN_MEMORY; -} - -static struct_arg_type value_type_info(MonoClass *klass, int *native_size, int *regs_used, int *offset1, int *size1, int *offset2, int *size2) -{ - MonoMarshalType *info = mono_marshal_load_type_info (klass); - - *native_size = info->native_size; - - if (info->native_size > 8 || info->num_fields > 2) - { - *regs_used = 0; - *offset1 = -1; - *offset2 = -1; - return ARG_IN_MEMORY; - } - - if (info->num_fields == 1) - { - struct_arg_type result = compute_arg_type(info->fields[0].field->type); - if (result != ARG_IN_MEMORY) - { - *regs_used = 1; - *offset1 = info->fields[0].offset; - *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); - } - else - { - *regs_used = 0; - *offset1 = -1; - } - - *offset2 = -1; - return result; - } - - struct_arg_type result1 = compute_arg_type(info->fields[0].field->type); - struct_arg_type result2 = compute_arg_type(info->fields[0].field->type); - - if (result1 == result2 && result1 != ARG_IN_MEMORY) - { - *regs_used = 2; - *offset1 = info->fields[0].offset; - *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); - *offset2 = info->fields[1].offset; - *size2 = mono_marshal_type_size (info->fields[1].field->type, info->fields[1].mspec, NULL, 1, 1); - return result1; - } - - return ARG_IN_MEMORY; -} - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - unsigned char *p, *code_buffer; - guint32 stack_size = 0, code_size = 50; - guint32 arg_pos, simpletype; - int i; - static GHashTable *cache = NULL; - MonoPIFunc res; - - guint32 int_arg_regs_used = 0; - guint32 float_arg_regs_used = 0; - guint32 next_int_arg_reg = 0; - guint32 next_float_arg_reg = 0; - /* Indicates that the return value is filled in inside the called function. */ - int retval_implicit = 0; - char *arg_in_reg_bitvector; /* A set index by argument number saying if it is in a register - (integer or floating point according to type) */ - - if (!cache) - cache = g_hash_table_new ((GHashFunc)mono_signature_hash, - (GCompareFunc)mono_metadata_signature_equal); - - if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) - return res; - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { - int_arg_regs_used++; - code_size += MOVE_INT_REG_ARG_SIZE; - } - - if (sig->hasthis) { - int_arg_regs_used++; - code_size += MOVE_INT_REG_ARG_SIZE; - } - - /* Run through stuff to calculate code size and argument bytes that will be pushed on stack (stack_size). */ - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - if (int_arg_regs_used++ > MAX_INT_ARG_REGS) { - stack_size += 8; - code_size += PUSH_INT_STACK_ARG_SIZE; - } - else - code_size += MOVE_INT_REG_ARG_SIZE; - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - if (arg_type == ARG_IN_INT_REGS && - (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) - { - code_size += MOVE_INT_REG_ARG_SIZE; - int_arg_regs_used += regs_used; - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) - { - code_size += MOVE_FLOAT_REG_ARG_SIZE; - float_arg_regs_used += regs_used; - break; - } - - /* Else item is in memory. */ - - stack_size += size + 7; - stack_size &= ~7; - code_size += COPY_STRUCT_STACK_ARG_SIZE; - - break; - } - case MONO_TYPE_R4: - case MONO_TYPE_R8: - if (float_arg_regs_used++ > MAX_FLOAT_ARG_REGS) { - stack_size += 8; - code_size += PUSH_FLOAT_STACK_ARG_SIZE; - } - else - code_size += MOVE_FLOAT_REG_ARG_SIZE; - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - /* - * FIXME: take into account large return values. - * (Comment carried over from IA32 code. Not sure what it means :-) - */ - - code_buffer = p = alloca (code_size); - - /* - * Standard function prolog. - */ - amd64_push_reg (p, AMD64_RBP); - amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); - /* - * and align to 16 byte boundary... - */ - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - retval_implicit = 1; - } - } - - if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { - /* Push the retval register so it is saved across the call. It will be addressed via RBP later. */ - amd64_push_reg (p, AMD64_RSI); - stack_size += 8; - } - - /* Ensure stack is 16 byte aligned when entering called function as required by calling convention. - * Getting this wrong results in a general protection fault on an SSE load or store somewhere in the - * code called under the trampoline. - */ - if ((stack_size & 15) != 0) - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 16 - (stack_size & 15)); - - /* - * On entry to generated function: - * RDI has target function address - * RSI has return value location address - * RDX has this pointer address - * RCX has the pointer to the args array. - * - * Inside the stub function: - * R10 holds the pointer to the args - * R11 holds the target function address. - * The return value address is pushed on the stack. - * The this pointer is moved into the first arg register at the start. - * - * Optimization note: we could keep the args pointer in RCX and then - * load over itself at the end. Ditto the callee addres could be left in RDI in some cases. - */ - - /* Move args pointer to temp register. */ - amd64_mov_reg_reg (p, AMD64_R10, AMD64_RCX, 8); - amd64_mov_reg_reg (p, AMD64_R11, AMD64_RDI, 8); - - /* First args register gets return value pointer, if need be. - * Note that "byref" equal true means the called function returns a pointer. - */ - if (retval_implicit) { - amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RSI, 8); - next_int_arg_reg++; - } - - /* this pointer goes in next args register. */ - if (sig->hasthis) { - amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RDX, 8); - next_int_arg_reg++; - } - - /* - * Generate code to handle arguments in registers. Stack arguments will happen in a loop after this. - */ - arg_in_reg_bitvector = (char *)alloca((sig->param_count + 7) / 8); - memset(arg_in_reg_bitvector, 0, (sig->param_count + 7) / 8); - - /* First, load all the arguments that are passed in registers into the appropriate registers. - * Below there is another loop to handle arguments passed on the stack. - */ - for (i = 0; i < sig->param_count; i++) { - arg_pos = ARG_SIZE * i; - - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_marshal: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_CLASS: - if (next_int_arg_reg < MAX_INT_ARG_REGS) { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos, 8); - next_int_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_R4: - if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); - next_float_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_R8: - if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); - next_float_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_VALUETYPE: { - if (!sig->params [i]->data.klass->enumtype) { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS && - (next_int_arg_reg + regs_used) <= MAX_INT_ARG_REGS) - { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset1, size1); - next_int_arg_reg++; - if (regs_used > 1) - { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset2, size2); - next_int_arg_reg++; - } - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (next_float_arg_reg + regs_used) <= MAX_FLOAT_ARG_REGS) - { - if (size1 == 4) - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); - else - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); - next_float_arg_reg++; - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); - else - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); - next_float_arg_reg++; - } - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - break; - } - - /* Structs in memory are handled in the next loop. */ - } else { - /* it's an enum value */ - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_marshal; - } - break; - } - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - /* Handle stack arguments, pushing the rightmost argument first. */ - for (i = sig->param_count; i > 0; --i) { - arg_pos = ARG_SIZE * (i - 1); - if (sig->params [i - 1]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i - 1]->type; -enum_marshal2: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_CLASS: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_R4: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_R8: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_VALUETYPE: - if (!sig->params [i - 1]->data.klass->enumtype) { - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) - { - int ss = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); - ss += 7; - ss &= ~7; - - amd64_alu_reg_imm(p, X86_SUB, AMD64_RSP, ss); - /* Count register */ - amd64_mov_reg_imm(p, AMD64_RCX, ss); - /* Source register */ - amd64_lea_membase(p, AMD64_RSI, AMD64_R10, arg_pos); - /* Dest register */ - amd64_mov_reg_reg(p, AMD64_RDI, AMD64_RSP, 8); - - /* AMD64 calling convention guarantees direction flag is clear at call boundary. */ - x86_prefix(p, AMD64_REX(AMD64_REX_W)); - x86_prefix(p, X86_REP_PREFIX); - x86_movsb(p); - } - } else { - /* it's an enum value */ - simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; - goto enum_marshal2; - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); - } - } - - /* TODO: Set RAL to number of XMM registers used in case this is a varags function? */ - - /* - * Insert call to function - */ - amd64_call_reg (p, AMD64_R11); - - if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { - amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, 8); - } - /* - * Handle retval. - * Small integer and pointer values are in EAX. - * Long integers are in EAX:EDX. - * FP values are on the FP stack. - */ - - if (sig->ret->byref || string_ctor) { - simpletype = MONO_TYPE_PTR; - } else { - simpletype = sig->ret->type; - } - enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - case MONO_TYPE_PTR: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); - break; - case MONO_TYPE_R4: - amd64_movss_regp_reg (p, AMD64_RSI, AMD64_XMM0); - break; - case MONO_TYPE_R8: - amd64_movsd_regp_reg (p, AMD64_RSI, AMD64_XMM0); - break; - case MONO_TYPE_I8: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS) - { - amd64_mov_membase_reg (p, AMD64_RSI, offset1, AMD64_RAX, size1); - if (regs_used > 1) - amd64_mov_membase_reg (p, AMD64_RSI, offset2, AMD64_RDX, size2); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS) - { - if (size1 == 4) - amd64_movss_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); - else - amd64_movsd_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); - else - amd64_movsd_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); - } - break; - } - - /* Else result should have been stored in place already. */ - break; - } - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - - /* - * Standard epilog. - */ - amd64_leave (p); - amd64_ret (p); - - g_assert (p - code_buffer < code_size); - res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); - - g_hash_table_insert (cache, sig, res); - - return res; -} - -/* - * Returns a pointer to a native function that can be used to - * call the specified method. - * The function created will receive the arguments according - * to the call convention specified in the method. - * This function works by creating a MonoInvocation structure, - * filling the fields in and calling ves_exec_method on it. - * Still need to figure out how to handle the exception stuff - * across the managed/unmanaged boundary. - */ -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig; - MonoJitInfo *ji; - unsigned char *p, *code_buffer; - guint32 simpletype; - gint32 local_size; - gint32 stackval_pos; - gint32 mono_invocation_pos; - int i, cpos; - int *vtbuf; - int *rbpoffsets; - int int_arg_regs_used = 0; - int float_arg_regs_used = 0; - int stacked_args_size = 0; /* bytes of register passed arguments pushed on stack for safe keeping. Used to get alignment right. */ - int next_stack_arg_rbp_offset = 16; - int retval_ptr_rbp_offset = 0; - int this_reg = -1; /* Remember register this ptr is in. */ - - /* - * If it is a static P/Invoke method, we can just return the pointer - * to the method implementation. - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = method->addr; - - mono_jit_info_table_add (mono_root_domain, ji); - return method->addr; - } - - sig = method->signature; - - code_buffer = p = alloca (512); /* FIXME: check for overflows... */ - vtbuf = alloca (sizeof(int)*sig->param_count); - rbpoffsets = alloca (sizeof(int)*sig->param_count); - - - /* - * Standard function prolog. - */ - amd64_push_reg (p, AMD64_RBP); - amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); - - /* If there is an implicit return value pointer in the first args reg, save it now so - * the result can be stored through the pointer at the end. - */ - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) - { - amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); - int_arg_regs_used++; - stacked_args_size += 8; - retval_ptr_rbp_offset = -stacked_args_size; - } - - /* - * If there is a this pointer, remember the number of the register it is in. - */ - if (sig->hasthis) { - this_reg = int_arg_regs[int_arg_regs_used++]; - } - - /* Put all arguments passed in registers on the stack. - * Record offsets from RBP to each argument. - */ - cpos = 0; - - for (i = 0; i < sig->param_count; i++) { - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - if (int_arg_regs_used < MAX_INT_ARG_REGS) { - amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); - int_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - case MONO_TYPE_VALUETYPE: { - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - else - { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS && - (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) - { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); - stacked_args_size += size; - rbpoffsets[i] = stacked_args_size; - - amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset1, size1); - int_arg_regs_used++; - if (regs_used > 1) - { - amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset2, size2); - int_arg_regs_used++; - } - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) - { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); - stacked_args_size += size; - rbpoffsets[i] = stacked_args_size; - - if (size1 == 4) - amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); - else - amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); - float_arg_regs_used++; - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); - else - amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); - float_arg_regs_used++; - } - break; - } - - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += size; - } - break; - } - case MONO_TYPE_R4: - if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); - amd64_movss_regp_reg (p, AMD64_RSP, float_arg_regs_used); - float_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - case MONO_TYPE_R8: - stacked_args_size += 8; - if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); - amd64_movsd_regp_reg (p, AMD64_RSP, float_arg_regs_used); - float_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1) + stacked_args_size; - - local_size += 15; - local_size &= ~15; - - stackval_pos = -local_size; - mono_invocation_pos = stackval_pos + sizeof (stackval) * (sig->param_count + 1); - - /* stacked_args_size has already been pushed onto the stack. Make room for the rest of it. */ - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, local_size - stacked_args_size); - - /* Be careful not to trash any arg regs before saving this_reg to MonoInvocation structure below. */ - - /* - * Initialize MonoInvocation fields, first the ones known now. - */ - amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, child)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, 8); - /* - * Set the method pointer. - */ - amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, 8); - - /* - * Handle this. - */ - if (sig->hasthis) - amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, 8); - - /* - * Handle the arguments. stackval_pos is the offset from RBP of the stackval in the MonoInvocation args array . - * arg_pos is the offset from RBP to the incoming arg on the stack. - * We just call stackval_from_data to handle all the (nasty) issues.... - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, 8); - for (i = 0; i < sig->param_count; ++i) { -/* Need to call stackval_from_data (MonoType *type, stackval *result, char *data, gboolean pinvoke); */ - amd64_mov_reg_imm (p, AMD64_R11, stackval_from_data); - amd64_mov_reg_imm (p, int_arg_regs[0], sig->params[i]); - amd64_lea_membase (p, int_arg_regs[1], AMD64_RBP, stackval_pos); - amd64_lea_membase (p, int_arg_regs[2], AMD64_RBP, rbpoffsets[i]); - amd64_mov_reg_imm (p, int_arg_regs[3], sig->pinvoke); - amd64_call_reg (p, AMD64_R11); - stackval_pos += sizeof (stackval); -#if 0 - /* fixme: alignment */ - if (sig->pinvoke) - arg_pos += mono_type_native_stack_size (sig->params [i], &align); - else - arg_pos += mono_type_stack_size (sig->params [i], &align); -#endif - } - - /* - * Handle the return value storage area. - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, 8); - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, 8); - amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, 8); - } - } - - /* - * Call the method. - */ - amd64_lea_membase (p, int_arg_regs[0], AMD64_RBP, mono_invocation_pos); - amd64_mov_reg_imm (p, AMD64_R11, ves_exec_method); - amd64_call_reg (p, AMD64_R11); - - /* - * Move the return value to the proper place. - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - if (sig->ret->byref) { - amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); - } else { - int simpletype = sig->ret->type; - enum_retvalue: - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 4); - break; - case MONO_TYPE_I8: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); - break; - case MONO_TYPE_R4: - amd64_movss_regp_reg (p, AMD64_RAX, AMD64_XMM0); - break; - case MONO_TYPE_R8: - amd64_movsd_regp_reg (p, AMD64_RAX, AMD64_XMM0); - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS) - { - if (regs_used > 1) - amd64_mov_membase_reg (p, AMD64_RAX, offset2, AMD64_RDX, size2); - amd64_mov_membase_reg (p, AMD64_RAX, offset1, AMD64_RAX, size1); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS) - { - if (size1 == 4) - amd64_movss_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); - else - amd64_movsd_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); - else - amd64_movsd_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); - } - break; - } - - /* Else result should have been stored in place already. IA32 code has a stackval_to_data call here, which - * looks wrong to me as the pointer in the stack val being converted is setup to point to the output area anyway. - * It all looks a bit suspect anyway. - */ - break; - } - default: - g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); - break; - } - } - - /* - * Standard epilog. - */ - amd64_leave (p); - amd64_ret (p); - - g_assert (p - code_buffer < 512); - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = p - code_buffer; - ji->code_start = g_memdup (code_buffer, p - code_buffer); - - mono_jit_info_table_add (mono_root_domain, ji); - - return ji->code_start; -} -- cgit v1.1 From 94156ea640c77f37c64332acd21adf4170ecb67b Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Sat, 28 Feb 2004 15:53:18 +0000 Subject: Add svn path=/trunk/mono/; revision=23562 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index acf603d..986cced 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa +DIST_SUBDIRS = x86 x86-64 ppc sparc arm s390 alpha hppa INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From 7e46377b331225994068d848d9ff8ceaeb96d38a Mon Sep 17 00:00:00 2001 From: Duncan Mak Date: Mon, 8 Mar 2004 01:47:03 +0000 Subject: 2004-03-07 Duncan Mak * Makefile.am: Removed the reference to 'x86-64'. This was the cause of the missing Mono daily tarballs, 'make dist' wasn't working. We do have an 'amd64' directory, but it doesn't make it in 'make dist'. svn path=/trunk/mono/; revision=23784 --- ChangeLog | 8 ++++++++ Makefile.am | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index b6fd091..c3181e2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2004-03-07 Duncan Mak + + * Makefile.am: Removed the reference to 'x86-64'. This was the cause + of the missing Mono daily tarballs, 'make dist' wasn't working. + + We do have an 'amd64' directory, but it doesn't make it in 'make + dist'. + 2004-02-19 Zoltan Varga * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones. diff --git a/Makefile.am b/Makefile.am index 986cced..acf603d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 x86-64 ppc sparc arm s390 alpha hppa +DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From 36d64a0bbf92ca51335ddcb87627a8194f601820 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 11 Mar 2004 18:23:26 +0000 Subject: 2004-03-11 Zoltan Varga * sparc/sparc-codegen.h: Ongoing sparc work. svn path=/trunk/mono/; revision=23926 --- ChangeLog | 4 ++++ sparc/sparc-codegen.h | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index c3181e2..04317f2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-03-11 Zoltan Varga + + * sparc/sparc-codegen.h: Ongoing sparc work. + 2004-03-07 Duncan Mak * Makefile.am: Removed the reference to 'x86-64'. This was the cause diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index e965d25..76e0759 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -277,8 +277,12 @@ typedef struct { /* disassembly */ #define sparc_inst_op(inst) ((inst) >> 30) +#define sparc_inst_rd(inst) (((inst) >> 25) & 0x1f) #define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f) +#define sparc_inst_rs1(inst) (((inst) >> 14) & 0x1f) +#define sparc_inst_rs2(inst) (((inst) >> 0) & 0x1f) #define sparc_inst_imm(inst) (((inst) >> 13) & 0x1) +#define sparc_inst_imm13(inst) (((inst) >> 0) & 0x1fff) #define sparc_encode_call(ins,addr) \ do { \ @@ -464,6 +468,8 @@ typedef struct { #define sparc_flush(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),59,0) #define sparc_flush_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),59,0) +#define sparc_flushw(ins) sparc_encode_format3a((ins),2,0,0,0,43,0) + /* trap */ #define sparc_ta(ins,tt) sparc_encode_format3b((ins),2,0,(tt),58,0x8) -- cgit v1.1 From 38dd3d4c585c7e9cc116b7dfb5e89356c4d02da2 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 15 Mar 2004 17:28:56 +0000 Subject: 2004-03-15 Zoltan Varga * sparc/sparc-codegen.h: Add some v9 instructions. svn path=/trunk/mono/; revision=24050 --- ChangeLog | 4 ++ sparc/sparc-codegen.h | 149 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 153 insertions(+) diff --git a/ChangeLog b/ChangeLog index 04317f2..e61ad04 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-03-15 Zoltan Varga + + * sparc/sparc-codegen.h: Add some v9 instructions. + 2004-03-11 Zoltan Varga * sparc/sparc-codegen.h: Ongoing sparc work. diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 76e0759..053a96f 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -158,6 +158,15 @@ typedef enum { } SparcFCond; typedef enum { + sparc_icc = 4, + sparc_xcc = 6, + sparc_fcc0 = 0, + sparc_fcc1 = 1, + sparc_fcc2 = 2, + sparc_fcc3 = 3 +} SparcCC; + +typedef enum { /* fop1 format */ sparc_fitos_val = 196, sparc_fitod_val = 200, @@ -221,6 +230,18 @@ typedef struct { } sparc_format2b; typedef struct { + unsigned int op : 2; /* always 0 */ + unsigned int a : 1; + unsigned int res : 1; + unsigned int rcond: 3; + unsigned int op2 : 3; + unsigned int d16hi: 2; + unsigned int p : 1; + unsigned int rs1 : 5; + unsigned int d16lo: 14; +} sparc_format2c; + +typedef struct { unsigned int op : 2; /* 2 or 3 */ unsigned int rd : 5; unsigned int op3 : 6; @@ -269,11 +290,56 @@ typedef struct { unsigned int rs2 : 5; } sparc_format3c; +typedef struct { + unsigned int op : 2; + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int i : 1; + unsigned int cc01 : 2; + unsigned int res : 6; + unsigned int rs2 : 5; +} sparc_format4a; + +typedef struct { + unsigned int op : 2; + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int rs1 : 5; + unsigned int i : 1; + unsigned int cc01 : 2; + unsigned int simm : 11; +} sparc_format4b; + +typedef struct { + unsigned int op : 2; + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int cc2 : 1; + unsigned int cond : 4; + unsigned int i : 1; + unsigned int cc01 : 2; + unsigned int res : 6; + unsigned int rs2 : 5; +} sparc_format4c; + +typedef struct { + unsigned int op : 2; + unsigned int rd : 5; + unsigned int op3 : 6; + unsigned int cc2 : 1; + unsigned int cond : 4; + unsigned int i : 1; + unsigned int cc01 : 2; + unsigned int simm : 11; +} sparc_format4d; + /* for use in logical ops, use 0 to not set flags */ #define sparc_cc 16 #define sparc_is_imm13(val) ((gint)val >= (gint)-(1<<12) && (gint)val <= (gint)((1<<12)-1)) #define sparc_is_imm22(val) ((gint)val >= (gint)-(1<<21) && (gint)val <= (gint)((1<<21)-1)) +#define sparc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) /* disassembly */ #define sparc_inst_op(inst) ((inst) >> 30) @@ -313,6 +379,21 @@ typedef struct { (ins) = (unsigned int*)__f + 1; \ } while (0) +#define sparc_encode_format2c(ins,aval,bcond,oper,predict,r1,disp16) \ + do { \ + sparc_format2c *__f = (sparc_format2c*)(ins); \ + __f->op = 0; \ + __f->a = (aval); \ + __f->res = 0; \ + __f->rcond = (bcond); \ + __f->op2 = (oper); \ + __f->d16hi = ((disp16) >> 14); \ + __f->p = (predict); \ + __f->rs1 = (r1); \ + __f->d16lo = ((disp16) & 0x3fff); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + #define sparc_encode_format3a(ins,opval,asival,r1,r2,oper,dest) \ do { \ sparc_format3a *__f = (sparc_format3a*)(ins); \ @@ -377,6 +458,62 @@ typedef struct { (ins) = (unsigned int*)__f + 1; \ } while (0) +#define sparc_encode_format4a(ins,opval,oper,cc,r1,r2,dest) \ + do { \ + sparc_format4a *__f = (sparc_format4a*)(ins); \ + __f->op = (opval); \ + __f->rd = (dest); \ + __f->op3 = (oper); \ + __f->rs1 = (r1); \ + __f->i = 0; \ + __f->cc01= (cc) & 0x3; \ + __f->res = 0; \ + __f->rs2 = (r2); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format4b(ins,opval,oper,cc,r1,imm,dest) \ + do { \ + sparc_format4b *__f = (sparc_format4b*)(ins); \ + __f->op = (opval); \ + __f->rd = (dest); \ + __f->op3 = (oper); \ + __f->rs1 = (r1); \ + __f->i = 1; \ + __f->cc01= (cc) & 0x3; \ + __f->simm = (imm); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format4c(ins,opval,oper,cc,bcond,r2,dest) \ + do { \ + sparc_format4c *__f = (sparc_format4c*)(ins); \ + __f->op = (opval); \ + __f->rd = (dest); \ + __f->op3 = (oper); \ + __f->cc2 = ((xcc) >> 2) & 0x1; \ + __f->cond = bcond; \ + __f->i = 0; \ + __f->cc01= (xcc) & 0x3; \ + __f->res = 0; \ + __f->rs2 = (r2); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format4d(ins,opval,oper,xcc,bcond,imm,dest) \ + do { \ + sparc_format4d *__f = (sparc_format4d*)(ins); \ + __f->op = (opval); \ + __f->rd = (dest); \ + __f->op3 = (oper); \ + __f->cc2 = ((xcc) >> 2) & 0x1; \ + __f->cond = bcond; \ + __f->i = 1; \ + __f->cc01= (xcc) & 0x3; \ + __f->simm = (imm); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + /* is it useful to provide a non-default value? */ #define sparc_asi 0x0 @@ -616,6 +753,18 @@ typedef struct { /* FIXME: float condition codes are different: unify. */ #define sparc_fbranch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),6,(displ)) +#define sparc_brz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x1,0x3,(predict),(rs1),(disp)) +#define sparc_brlez(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x2,0x3,(predict),(rs1),(disp)) +#define sparc_brlz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x3,0x3,(predict),(rs1),(disp)) +#define sparc_brnz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x5,0x3,(predict),(rs1),(disp)) +#define sparc_brgz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x6,0x3,(predict),(rs1),(disp)) +#define sparc_brgez(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x7,0x3,(predict),(rs1),(disp)) + +/* conditional moves */ +#define sparc_movcc(ins,cc,condval,r1,dest) sparc_encode_format4c((ins), 0x2, 0x2c, cc, condval, r1, dest) + +#define sparc_movcc_imm(ins,cc,condval,imm,dest) sparc_encode_format4d((ins), 0x2, 0x2c, cc, condval, imm, dest) + /* synthetic instructions */ #define sparc_cmp(ins,r1,r2) sparc_sub((ins),sparc_cc,(r1),(r2),sparc_g0) #define sparc_cmp_imm(ins,r1,imm) sparc_sub_imm((ins),sparc_cc,(r1),(imm),sparc_g0) -- cgit v1.1 From 25f79c5f1b26de4e7a413128d37731e1fcf09f14 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Tue, 16 Mar 2004 00:02:55 +0000 Subject: 2004-03-15 Bernie Solomon * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg so Sun's dis command recognizes it. svn path=/trunk/mono/; revision=24084 --- ChangeLog | 5 +++++ sparc/sparc-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index e61ad04..f46096b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2004-03-15 Bernie Solomon + + * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg + so Sun's dis command recognizes it. + 2004-03-15 Zoltan Varga * sparc/sparc-codegen.h: Add some v9 instructions. diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 053a96f..b91cf5b 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -813,7 +813,7 @@ typedef struct { #define sparc_neg(ins,reg) sparc_sub((ins),FALSE,sparc_g0,(reg),(reg)) #define sparc_clr_reg(ins,reg) sparc_or((ins),FALSE,sparc_g0,sparc_g0,(reg)) -#define sparc_mov_reg_reg(ins,src,dest) sparc_or_imm((ins),FALSE,(src),0,(dest)) +#define sparc_mov_reg_reg(ins,src,dest) sparc_or((ins),FALSE,sparc_g0,(src),(dest)) #endif /* __SPARC_CODEGEN_H__ */ -- cgit v1.1 From a97ef493bb1e42b3afa548e47e3e14afe028b3ef Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Tue, 16 Mar 2004 16:03:49 +0000 Subject: Add x86-64 svn path=/trunk/mono/; revision=24131 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index acf603d..d9c5478 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa +DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa x86-64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From 01dc8bdaddab8f9b1c939716c36d13a35cf2494d Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Tue, 16 Mar 2004 16:16:07 +0000 Subject: Added back svn path=/trunk/mono/; revision=24133 --- amd64/Makefile.am | 7 + amd64/tramp.c | 1055 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1062 insertions(+) create mode 100644 amd64/Makefile.am create mode 100644 amd64/tramp.c diff --git a/amd64/Makefile.am b/amd64/Makefile.am new file mode 100644 index 0000000..54499b5 --- /dev/null +++ b/amd64/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-amd64.la + +libmonoarch_amd64_la_SOURCES = tramp.c amd64-codegen.h + diff --git a/amd64/tramp.c b/amd64/tramp.c new file mode 100644 index 0000000..cfe3ff1 --- /dev/null +++ b/amd64/tramp.c @@ -0,0 +1,1055 @@ +/* + * Create trampolines to invoke arbitrary functions. + * + * Copyright (C) Ximian Inc. + * + * Author: + * Zalman Stern + * Based on code by: + * Paolo Molaro (lupus@ximian.com) + * Dietmar Maurer (dietmar@ximian.com) + * + * To understand this code, one will want to the calling convention section of the ABI sepc at: + * http://x86-64.org/abi.pdf + * and the AMD64 architecture docs found at amd.com . + */ + +#include "config.h" +#include +#include +#include "amd64-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" +#include "mono/metadata/marshal.h" + +/* + * The resulting function takes the form: + * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); + */ +#define FUNC_ADDR_POS 8 +#define RETVAL_POS 12 +#define THIS_POS 16 +#define ARGP_POS 20 +#define LOC_POS -4 + +#define ARG_SIZE sizeof (stackval) + +#define MAX_INT_ARG_REGS 6 +#define MAX_FLOAT_ARG_REGS 8 + +// TODO get these right. They are upper bounds anyway, so it doesn't much matter. +#define PUSH_INT_STACK_ARG_SIZE 16 +#define MOVE_INT_REG_ARG_SIZE 16 +#define PUSH_FLOAT_STACK_ARG_SIZE 16 +#define MOVE_FLOAT_REG_ARG_SIZE 16 +#define COPY_STRUCT_STACK_ARG_SIZE 16 + +/* Maps an argument number (starting at 0) to the register it is passed in (if it fits). + * E.g. int foo(int bar, int quux) has the foo arg in RDI and the quux arg in RSI + * There is no such map for floating point args as they go in XMM0-XMM7 in order and thus the + * index is the register number. + */ +static int int_arg_regs[] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 }; + +/* This next block of code resolves the ABI rules for passing structures in the argument registers. + * These basically amount to "Use up to two registers if they are all integer or all floating point. + * If the structure is bigger than two registers or would be in one integer register and one floating point, + * it is passed in memory instead. + * + * It is possible this code needs to be recursive to be correct in the case when one of the structure members + * is itself a structure. + * + * The 80-bit floating point stuff is ignored. + */ +typedef enum { + ARG_IN_MEMORY, + ARG_IN_INT_REGS, + ARG_IN_FLOAT_REGS +} struct_arg_type; + +static struct_arg_type compute_arg_type(MonoType *type) +{ + guint32 simpletype = type->type; + + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + return ARG_IN_INT_REGS; + break; + case MONO_TYPE_VALUETYPE: { + if (type->data.klass->enumtype) + return ARG_IN_INT_REGS; + return ARG_IN_MEMORY; + break; + } + case MONO_TYPE_R4: + case MONO_TYPE_R8: + return ARG_IN_FLOAT_REGS; + break; + default: + g_error ("Can't trampoline 0x%x", type->type); + } + + return ARG_IN_MEMORY; +} + +static struct_arg_type value_type_info(MonoClass *klass, int *native_size, int *regs_used, int *offset1, int *size1, int *offset2, int *size2) +{ + MonoMarshalType *info = mono_marshal_load_type_info (klass); + + *native_size = info->native_size; + + if (info->native_size > 8 || info->num_fields > 2) + { + *regs_used = 0; + *offset1 = -1; + *offset2 = -1; + return ARG_IN_MEMORY; + } + + if (info->num_fields == 1) + { + struct_arg_type result = compute_arg_type(info->fields[0].field->type); + if (result != ARG_IN_MEMORY) + { + *regs_used = 1; + *offset1 = info->fields[0].offset; + *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); + } + else + { + *regs_used = 0; + *offset1 = -1; + } + + *offset2 = -1; + return result; + } + + struct_arg_type result1 = compute_arg_type(info->fields[0].field->type); + struct_arg_type result2 = compute_arg_type(info->fields[0].field->type); + + if (result1 == result2 && result1 != ARG_IN_MEMORY) + { + *regs_used = 2; + *offset1 = info->fields[0].offset; + *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); + *offset2 = info->fields[1].offset; + *size2 = mono_marshal_type_size (info->fields[1].field->type, info->fields[1].mspec, NULL, 1, 1); + return result1; + } + + return ARG_IN_MEMORY; +} + +MonoPIFunc +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + unsigned char *p, *code_buffer; + guint32 stack_size = 0, code_size = 50; + guint32 arg_pos, simpletype; + int i; + static GHashTable *cache = NULL; + MonoPIFunc res; + + guint32 int_arg_regs_used = 0; + guint32 float_arg_regs_used = 0; + guint32 next_int_arg_reg = 0; + guint32 next_float_arg_reg = 0; + /* Indicates that the return value is filled in inside the called function. */ + int retval_implicit = 0; + char *arg_in_reg_bitvector; /* A set index by argument number saying if it is in a register + (integer or floating point according to type) */ + + if (!cache) + cache = g_hash_table_new ((GHashFunc)mono_signature_hash, + (GCompareFunc)mono_metadata_signature_equal); + + if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) + return res; + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { + int_arg_regs_used++; + code_size += MOVE_INT_REG_ARG_SIZE; + } + + if (sig->hasthis) { + int_arg_regs_used++; + code_size += MOVE_INT_REG_ARG_SIZE; + } + + /* Run through stuff to calculate code size and argument bytes that will be pushed on stack (stack_size). */ + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + if (int_arg_regs_used++ > MAX_INT_ARG_REGS) { + stack_size += 8; + code_size += PUSH_INT_STACK_ARG_SIZE; + } + else + code_size += MOVE_INT_REG_ARG_SIZE; + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + if (arg_type == ARG_IN_INT_REGS && + (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) + { + code_size += MOVE_INT_REG_ARG_SIZE; + int_arg_regs_used += regs_used; + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) + { + code_size += MOVE_FLOAT_REG_ARG_SIZE; + float_arg_regs_used += regs_used; + break; + } + + /* Else item is in memory. */ + + stack_size += size + 7; + stack_size &= ~7; + code_size += COPY_STRUCT_STACK_ARG_SIZE; + + break; + } + case MONO_TYPE_R4: + case MONO_TYPE_R8: + if (float_arg_regs_used++ > MAX_FLOAT_ARG_REGS) { + stack_size += 8; + code_size += PUSH_FLOAT_STACK_ARG_SIZE; + } + else + code_size += MOVE_FLOAT_REG_ARG_SIZE; + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + /* + * FIXME: take into account large return values. + * (Comment carried over from IA32 code. Not sure what it means :-) + */ + + code_buffer = p = alloca (code_size); + + /* + * Standard function prolog. + */ + amd64_push_reg (p, AMD64_RBP); + amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); + /* + * and align to 16 byte boundary... + */ + + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + retval_implicit = 1; + } + } + + if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { + /* Push the retval register so it is saved across the call. It will be addressed via RBP later. */ + amd64_push_reg (p, AMD64_RSI); + stack_size += 8; + } + + /* Ensure stack is 16 byte aligned when entering called function as required by calling convention. + * Getting this wrong results in a general protection fault on an SSE load or store somewhere in the + * code called under the trampoline. + */ + if ((stack_size & 15) != 0) + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 16 - (stack_size & 15)); + + /* + * On entry to generated function: + * RDI has target function address + * RSI has return value location address + * RDX has this pointer address + * RCX has the pointer to the args array. + * + * Inside the stub function: + * R10 holds the pointer to the args + * R11 holds the target function address. + * The return value address is pushed on the stack. + * The this pointer is moved into the first arg register at the start. + * + * Optimization note: we could keep the args pointer in RCX and then + * load over itself at the end. Ditto the callee addres could be left in RDI in some cases. + */ + + /* Move args pointer to temp register. */ + amd64_mov_reg_reg (p, AMD64_R10, AMD64_RCX, 8); + amd64_mov_reg_reg (p, AMD64_R11, AMD64_RDI, 8); + + /* First args register gets return value pointer, if need be. + * Note that "byref" equal true means the called function returns a pointer. + */ + if (retval_implicit) { + amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RSI, 8); + next_int_arg_reg++; + } + + /* this pointer goes in next args register. */ + if (sig->hasthis) { + amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RDX, 8); + next_int_arg_reg++; + } + + /* + * Generate code to handle arguments in registers. Stack arguments will happen in a loop after this. + */ + arg_in_reg_bitvector = (char *)alloca((sig->param_count + 7) / 8); + memset(arg_in_reg_bitvector, 0, (sig->param_count + 7) / 8); + + /* First, load all the arguments that are passed in registers into the appropriate registers. + * Below there is another loop to handle arguments passed on the stack. + */ + for (i = 0; i < sig->param_count; i++) { + arg_pos = ARG_SIZE * i; + + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_marshal: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_CLASS: + if (next_int_arg_reg < MAX_INT_ARG_REGS) { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos, 8); + next_int_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_R4: + if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); + next_float_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_R8: + if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); + next_float_arg_reg++; + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + } + break; + case MONO_TYPE_VALUETYPE: { + if (!sig->params [i]->data.klass->enumtype) { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS && + (next_int_arg_reg + regs_used) <= MAX_INT_ARG_REGS) + { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset1, size1); + next_int_arg_reg++; + if (regs_used > 1) + { + amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset2, size2); + next_int_arg_reg++; + } + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (next_float_arg_reg + regs_used) <= MAX_FLOAT_ARG_REGS) + { + if (size1 == 4) + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); + else + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); + next_float_arg_reg++; + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); + else + amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); + next_float_arg_reg++; + } + arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); + break; + } + + /* Structs in memory are handled in the next loop. */ + } else { + /* it's an enum value */ + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_marshal; + } + break; + } + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + /* Handle stack arguments, pushing the rightmost argument first. */ + for (i = sig->param_count; i > 0; --i) { + arg_pos = ARG_SIZE * (i - 1); + if (sig->params [i - 1]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i - 1]->type; +enum_marshal2: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_I8: + case MONO_TYPE_U8: + case MONO_TYPE_CLASS: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_R4: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_R8: + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { + amd64_push_membase (p, AMD64_R10, arg_pos); + } + break; + case MONO_TYPE_VALUETYPE: + if (!sig->params [i - 1]->data.klass->enumtype) { + if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) + { + int ss = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); + ss += 7; + ss &= ~7; + + amd64_alu_reg_imm(p, X86_SUB, AMD64_RSP, ss); + /* Count register */ + amd64_mov_reg_imm(p, AMD64_RCX, ss); + /* Source register */ + amd64_lea_membase(p, AMD64_RSI, AMD64_R10, arg_pos); + /* Dest register */ + amd64_mov_reg_reg(p, AMD64_RDI, AMD64_RSP, 8); + + /* AMD64 calling convention guarantees direction flag is clear at call boundary. */ + x86_prefix(p, AMD64_REX(AMD64_REX_W)); + x86_prefix(p, X86_REP_PREFIX); + x86_movsb(p); + } + } else { + /* it's an enum value */ + simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; + goto enum_marshal2; + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); + } + } + + /* TODO: Set RAL to number of XMM registers used in case this is a varags function? */ + + /* + * Insert call to function + */ + amd64_call_reg (p, AMD64_R11); + + if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { + amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, 8); + } + /* + * Handle retval. + * Small integer and pointer values are in EAX. + * Long integers are in EAX:EDX. + * FP values are on the FP stack. + */ + + if (sig->ret->byref || string_ctor) { + simpletype = MONO_TYPE_PTR; + } else { + simpletype = sig->ret->type; + } + enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 1); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 2); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + case MONO_TYPE_PTR: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); + break; + case MONO_TYPE_R4: + amd64_movss_regp_reg (p, AMD64_RSI, AMD64_XMM0); + break; + case MONO_TYPE_R8: + amd64_movsd_regp_reg (p, AMD64_RSI, AMD64_XMM0); + break; + case MONO_TYPE_I8: + amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS) + { + amd64_mov_membase_reg (p, AMD64_RSI, offset1, AMD64_RAX, size1); + if (regs_used > 1) + amd64_mov_membase_reg (p, AMD64_RSI, offset2, AMD64_RDX, size2); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS) + { + if (size1 == 4) + amd64_movss_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); + else + amd64_movsd_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); + else + amd64_movsd_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); + } + break; + } + + /* Else result should have been stored in place already. */ + break; + } + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + + /* + * Standard epilog. + */ + amd64_leave (p); + amd64_ret (p); + + g_assert (p - code_buffer < code_size); + res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); + + g_hash_table_insert (cache, sig, res); + + return res; +} + +/* + * Returns a pointer to a native function that can be used to + * call the specified method. + * The function created will receive the arguments according + * to the call convention specified in the method. + * This function works by creating a MonoInvocation structure, + * filling the fields in and calling ves_exec_method on it. + * Still need to figure out how to handle the exception stuff + * across the managed/unmanaged boundary. + */ +void * +mono_arch_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig; + MonoJitInfo *ji; + unsigned char *p, *code_buffer; + guint32 simpletype; + gint32 local_size; + gint32 stackval_pos; + gint32 mono_invocation_pos; + int i, cpos; + int *vtbuf; + int *rbpoffsets; + int int_arg_regs_used = 0; + int float_arg_regs_used = 0; + int stacked_args_size = 0; /* bytes of register passed arguments pushed on stack for safe keeping. Used to get alignment right. */ + int next_stack_arg_rbp_offset = 16; + int retval_ptr_rbp_offset = 0; + int this_reg = -1; /* Remember register this ptr is in. */ + + /* + * If it is a static P/Invoke method, we can just return the pointer + * to the method implementation. + */ + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = 1; + ji->code_start = method->addr; + + mono_jit_info_table_add (mono_root_domain, ji); + return method->addr; + } + + sig = method->signature; + + code_buffer = p = alloca (512); /* FIXME: check for overflows... */ + vtbuf = alloca (sizeof(int)*sig->param_count); + rbpoffsets = alloca (sizeof(int)*sig->param_count); + + + /* + * Standard function prolog. + */ + amd64_push_reg (p, AMD64_RBP); + amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); + + /* If there is an implicit return value pointer in the first args reg, save it now so + * the result can be stored through the pointer at the end. + */ + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) + { + amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); + int_arg_regs_used++; + stacked_args_size += 8; + retval_ptr_rbp_offset = -stacked_args_size; + } + + /* + * If there is a this pointer, remember the number of the register it is in. + */ + if (sig->hasthis) { + this_reg = int_arg_regs[int_arg_regs_used++]; + } + + /* Put all arguments passed in registers on the stack. + * Record offsets from RBP to each argument. + */ + cpos = 0; + + for (i = 0; i < sig->param_count; i++) { + if (sig->params [i]->byref) + simpletype = MONO_TYPE_PTR; + else + simpletype = sig->params [i]->type; +enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_I8: + if (int_arg_regs_used < MAX_INT_ARG_REGS) { + amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); + int_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + case MONO_TYPE_VALUETYPE: { + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + else + { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS && + (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) + { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); + stacked_args_size += size; + rbpoffsets[i] = stacked_args_size; + + amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset1, size1); + int_arg_regs_used++; + if (regs_used > 1) + { + amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset2, size2); + int_arg_regs_used++; + } + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS && + (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) + { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); + stacked_args_size += size; + rbpoffsets[i] = stacked_args_size; + + if (size1 == 4) + amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); + else + amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); + float_arg_regs_used++; + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); + else + amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); + float_arg_regs_used++; + } + break; + } + + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += size; + } + break; + } + case MONO_TYPE_R4: + if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); + amd64_movss_regp_reg (p, AMD64_RSP, float_arg_regs_used); + float_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + case MONO_TYPE_R8: + stacked_args_size += 8; + if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); + amd64_movsd_regp_reg (p, AMD64_RSP, float_arg_regs_used); + float_arg_regs_used++; + stacked_args_size += 8; + rbpoffsets[i] = -stacked_args_size; + } + else + { + rbpoffsets[i] = next_stack_arg_rbp_offset; + next_stack_arg_rbp_offset += 8; + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1) + stacked_args_size; + + local_size += 15; + local_size &= ~15; + + stackval_pos = -local_size; + mono_invocation_pos = stackval_pos + sizeof (stackval) * (sig->param_count + 1); + + /* stacked_args_size has already been pushed onto the stack. Make room for the rest of it. */ + amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, local_size - stacked_args_size); + + /* Be careful not to trash any arg regs before saving this_reg to MonoInvocation structure below. */ + + /* + * Initialize MonoInvocation fields, first the ones known now. + */ + amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, child)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, 8); + /* + * Set the method pointer. + */ + amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, 8); + + /* + * Handle this. + */ + if (sig->hasthis) + amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, 8); + + /* + * Handle the arguments. stackval_pos is the offset from RBP of the stackval in the MonoInvocation args array . + * arg_pos is the offset from RBP to the incoming arg on the stack. + * We just call stackval_from_data to handle all the (nasty) issues.... + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, 8); + for (i = 0; i < sig->param_count; ++i) { +/* Need to call stackval_from_data (MonoType *type, stackval *result, char *data, gboolean pinvoke); */ + amd64_mov_reg_imm (p, AMD64_R11, stackval_from_data); + amd64_mov_reg_imm (p, int_arg_regs[0], sig->params[i]); + amd64_lea_membase (p, int_arg_regs[1], AMD64_RBP, stackval_pos); + amd64_lea_membase (p, int_arg_regs[2], AMD64_RBP, rbpoffsets[i]); + amd64_mov_reg_imm (p, int_arg_regs[3], sig->pinvoke); + amd64_call_reg (p, AMD64_R11); + stackval_pos += sizeof (stackval); +#if 0 + /* fixme: alignment */ + if (sig->pinvoke) + arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + arg_pos += mono_type_stack_size (sig->params [i], &align); +#endif + } + + /* + * Handle the return value storage area. + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, 8); + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, 8); + amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, 8); + } + } + + /* + * Call the method. + */ + amd64_lea_membase (p, int_arg_regs[0], AMD64_RBP, mono_invocation_pos); + amd64_mov_reg_imm (p, AMD64_R11, ves_exec_method); + amd64_call_reg (p, AMD64_R11); + + /* + * Move the return value to the proper place. + */ + amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); + if (sig->ret->byref) { + amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); + } else { + int simpletype = sig->ret->type; + enum_retvalue: + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 1); + break; + case MONO_TYPE_CHAR: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 2); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 4); + break; + case MONO_TYPE_I8: + amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); + break; + case MONO_TYPE_R4: + amd64_movss_regp_reg (p, AMD64_RAX, AMD64_XMM0); + break; + case MONO_TYPE_R8: + amd64_movsd_regp_reg (p, AMD64_RAX, AMD64_XMM0); + break; + case MONO_TYPE_VALUETYPE: { + int size; + int arg_type; + int regs_used; + int offset1; + int size1; + int offset2; + int size2; + + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + + arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); + + if (arg_type == ARG_IN_INT_REGS) + { + if (regs_used > 1) + amd64_mov_membase_reg (p, AMD64_RAX, offset2, AMD64_RDX, size2); + amd64_mov_membase_reg (p, AMD64_RAX, offset1, AMD64_RAX, size1); + break; + } + + if (arg_type == ARG_IN_FLOAT_REGS) + { + if (size1 == 4) + amd64_movss_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); + else + amd64_movsd_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); + + if (regs_used > 1) + { + if (size2 == 4) + amd64_movss_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); + else + amd64_movsd_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); + } + break; + } + + /* Else result should have been stored in place already. IA32 code has a stackval_to_data call here, which + * looks wrong to me as the pointer in the stack val being converted is setup to point to the output area anyway. + * It all looks a bit suspect anyway. + */ + break; + } + default: + g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); + break; + } + } + + /* + * Standard epilog. + */ + amd64_leave (p); + amd64_ret (p); + + g_assert (p - code_buffer < 512); + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = g_memdup (code_buffer, p - code_buffer); + + mono_jit_info_table_add (mono_root_domain, ji); + + return ji->code_start; +} -- cgit v1.1 From ce4b3b024bba2c8bd4d874a75ef7aa23e118abf7 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Tue, 16 Mar 2004 16:16:35 +0000 Subject: Rename, since stupid cvs gets confused with the dash in x86-64 svn path=/trunk/mono/; revision=24134 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index d9c5478..876b2fe 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa x86-64 +DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa amd64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From 49a337364d8413d2528fe97e68f16ef610bb3c6a Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Tue, 16 Mar 2004 16:20:03 +0000 Subject: Add svn path=/trunk/mono/; revision=24136 --- amd64/amd64-codegen.h | 409 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 amd64/amd64-codegen.h diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h new file mode 100644 index 0000000..68bcfec --- /dev/null +++ b/amd64/amd64-codegen.h @@ -0,0 +1,409 @@ +/* + * amd64-codegen.h: Macros for generating x86 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * Zalman Stern + * + * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef AMD64_H +#define AMD64_H + +typedef enum { + AMD64_RAX = 0, + AMD64_RCX = 1, + AMD64_RDX = 2, + AMD64_RBX = 3, + AMD64_RSP = 4, + AMD64_RBP = 5, + AMD64_RSI = 6, + AMD64_RDI = 7, + AMD64_R8 = 8, + AMD64_R9 = 9, + AMD64_R10 = 10, + AMD64_R11 = 11, + AMD64_R12 = 12, + AMD64_R13 = 13, + AMD64R_14 = 14, + AMD64_R15 = 15, + AMD64_NREG +} AMD64_Reg_No; + +typedef enum { + AMD64_XMM0 = 0, + AMD64_XMM1 = 1, + AMD64_XMM2 = 2, + AMD64_XMM3 = 3, + AMD64_XMM4 = 4, + AMD64_XMM5 = 5, + AMD64_XMM6 = 6, + AMD64_XMM8 = 8, + AMD64_XMM9 = 9, + AMD64_XMM10 = 10, + AMD64_XMM11 = 11, + AMD64_XMM12 = 12, + AMD64_XMM13 = 13, + AMD64_XMM14 = 14, + AMD64_XMM15 = 15, + AMD64_XMM_NREG = 16, +} AMD64_XMM_Reg_No; + +typedef enum +{ + AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} AMD64_REX_Bits; + +#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } + +typedef union { + long val; + unsigned char b [8]; +} amd64_imm_buf; + +#include "../x86/x86-codegen.h" + + +/* Need to fill this info in for amd64. */ + +#if 0 +/* +// bitvector mask for callee-saved registers +*/ +#define X86_ESI_MASK (1< Date: Tue, 16 Mar 2004 19:22:52 +0000 Subject: 2004-03-16 Zoltan Varga * sparc/sparc-codegen.h: Add v9 branches with prediction. svn path=/trunk/mono/; revision=24153 --- ChangeLog | 4 ++++ sparc/sparc-codegen.h | 48 +++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index f46096b..69a7ab9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-03-16 Zoltan Varga + + * sparc/sparc-codegen.h: Add v9 branches with prediction. + 2004-03-15 Bernie Solomon * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index b91cf5b..e479879 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -167,6 +167,11 @@ typedef enum { } SparcCC; typedef enum { + sparc_icc_short = 0, + sparc_xcc_short = 2 +} SparcCCShort; + +typedef enum { /* fop1 format */ sparc_fitos_val = 196, sparc_fitod_val = 200, @@ -232,6 +237,16 @@ typedef struct { typedef struct { unsigned int op : 2; /* always 0 */ unsigned int a : 1; + unsigned int cond : 4; + unsigned int op2 : 3; + unsigned int cc01 : 2; + unsigned int p : 1; + unsigned int d19 : 19; +} sparc_format2c; + +typedef struct { + unsigned int op : 2; /* always 0 */ + unsigned int a : 1; unsigned int res : 1; unsigned int rcond: 3; unsigned int op2 : 3; @@ -239,7 +254,7 @@ typedef struct { unsigned int p : 1; unsigned int rs1 : 5; unsigned int d16lo: 14; -} sparc_format2c; +} sparc_format2d; typedef struct { unsigned int op : 2; /* 2 or 3 */ @@ -340,6 +355,7 @@ typedef struct { #define sparc_is_imm13(val) ((gint)val >= (gint)-(1<<12) && (gint)val <= (gint)((1<<12)-1)) #define sparc_is_imm22(val) ((gint)val >= (gint)-(1<<21) && (gint)val <= (gint)((1<<21)-1)) #define sparc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) +#define sparc_is_imm19(val) ((gint)val >= (gint)-(1<<18) && (gint)val <= (gint)((1<<18)-1)) /* disassembly */ #define sparc_inst_op(inst) ((inst) >> 30) @@ -379,11 +395,24 @@ typedef struct { (ins) = (unsigned int*)__f + 1; \ } while (0) -#define sparc_encode_format2c(ins,aval,bcond,oper,predict,r1,disp16) \ +#define sparc_encode_format2c(ins,aval,bcond,oper,xcc,predict,disp19) \ do { \ sparc_format2c *__f = (sparc_format2c*)(ins); \ __f->op = 0; \ __f->a = (aval); \ + __f->cond = (bcond); \ + __f->op2 = (oper); \ + __f->cc01 = (xcc); \ + __f->p = (predict); \ + __f->d19 = (disp19); \ + (ins) = (unsigned int*)__f + 1; \ + } while (0) + +#define sparc_encode_format2d(ins,aval,bcond,oper,predict,r1,disp16) \ + do { \ + sparc_format2d *__f = (sparc_format2d*)(ins); \ + __f->op = 0; \ + __f->a = (aval); \ __f->res = 0; \ __f->rcond = (bcond); \ __f->op2 = (oper); \ @@ -752,13 +781,14 @@ typedef struct { #define sparc_branch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),2,(displ)) /* FIXME: float condition codes are different: unify. */ #define sparc_fbranch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),6,(displ)) - -#define sparc_brz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x1,0x3,(predict),(rs1),(disp)) -#define sparc_brlez(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x2,0x3,(predict),(rs1),(disp)) -#define sparc_brlz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x3,0x3,(predict),(rs1),(disp)) -#define sparc_brnz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x5,0x3,(predict),(rs1),(disp)) -#define sparc_brgz(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x6,0x3,(predict),(rs1),(disp)) -#define sparc_brgez(ins,aval,predict,rs1,disp) sparc_encode_format2c((ins), (aval),0x7,0x3,(predict),(rs1),(disp)) +#define sparc_branchp(ins,aval,condval,xcc,predict,displ) sparc_encode_format2c((ins),(aval),(condval),0x1,(xcc),(predict),(displ)) + +#define sparc_brz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x1,0x3,(predict),(rs1),(disp)) +#define sparc_brlez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x2,0x3,(predict),(rs1),(disp)) +#define sparc_brlz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x3,0x3,(predict),(rs1),(disp)) +#define sparc_brnz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x5,0x3,(predict),(rs1),(disp)) +#define sparc_brgz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x6,0x3,(predict),(rs1),(disp)) +#define sparc_brgez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x7,0x3,(predict),(rs1),(disp)) /* conditional moves */ #define sparc_movcc(ins,cc,condval,r1,dest) sparc_encode_format4c((ins), 0x2, 0x2c, cc, condval, r1, dest) -- cgit v1.1 From 73296dcd03106668c5db4511948983bdadeaee2f Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Tue, 23 Mar 2004 22:01:55 +0000 Subject: 2004-03-23 Bernie Solomon * hppa/hppa-codegen.h: created * hppa/tramp.c: changed style to be more like other platforms. * hppa/Makefile.am: add hppa-codegen.h svn path=/trunk/mono/; revision=24504 --- ChangeLog | 9 + hppa/Makefile.am | 2 +- hppa/hppa-codegen.h | 213 +++++++++++++++++++++ hppa/tramp.c | 543 +++++++++++++++++----------------------------------- 4 files changed, 401 insertions(+), 366 deletions(-) create mode 100644 hppa/hppa-codegen.h diff --git a/ChangeLog b/ChangeLog index 69a7ab9..c2f9a07 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,12 @@ +2004-03-23 Bernie Solomon + + * hppa/hppa-codegen.h: created + + * hppa/tramp.c: changed style to be more like + other platforms. + + * hppa/Makefile.am: add hppa-codegen.h + 2004-03-16 Zoltan Varga * sparc/sparc-codegen.h: Add v9 branches with prediction. diff --git a/hppa/Makefile.am b/hppa/Makefile.am index a867bcd..7e671cd 100644 --- a/hppa/Makefile.am +++ b/hppa/Makefile.am @@ -3,5 +3,5 @@ INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-hppa.la -libmonoarch_hppa_la_SOURCES = tramp.c +libmonoarch_hppa_la_SOURCES = tramp.c hppa-codegen.h diff --git a/hppa/hppa-codegen.h b/hppa/hppa-codegen.h new file mode 100644 index 0000000..9afd3dd --- /dev/null +++ b/hppa/hppa-codegen.h @@ -0,0 +1,213 @@ +typedef enum { + hppa_r0 = 0, + hppa_r1, + hppa_r2, + hppa_rp = hppa_r2, + hppa_r3, + hppa_r4, + hppa_r5, + hppa_r6, + hppa_r7, + hppa_r8, + hppa_r9, + hppa_r10, + hppa_r11, + hppa_r12, + hppa_r13, + hppa_r14, + hppa_r15, + hppa_r16, + hppa_r17, + hppa_r18, + hppa_r19, + hppa_r20, + hppa_r21, + hppa_r22, + hppa_r23, + hppa_r24, + hppa_r25, + hppa_r26, + hppa_r27, + hppa_r28, + hppa_r29, + hppa_ap = hppa_r29, + hppa_r30, + hppa_sp = hppa_r30, + hppa_r31 +} HPPAIntRegister; + +#define hppa_nop(p); \ + do { \ + *(p) = 0x08000240; \ + p++; \ + } while (0) + +#define hppa_ldb(p, disp, base, dest); \ + do { \ + int neg = (disp) < 0; \ + *(p) = (0x40000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_stb(p, src, disp, base) \ + do { \ + int neg = (disp) < 0; \ + *(p) = (0x60000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_ldh(p, disp, base, dest) \ + do { \ + int neg = (disp) < 0; \ + g_assert(((disp) & 1) == 0); \ + *(p) = (0x44000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_sth(p, src, disp, base) \ + do { \ + int neg = (disp) < 0; \ + g_assert(((disp) & 1) == 0); \ + *(p) = (0x64000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_ldw(p, disp, base, dest) \ + do { \ + int neg = (disp) < 0; \ + g_assert(((disp) & 3) == 0); \ + *(p) = (0x48000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_stw(p, src, disp, base) \ + do { \ + int neg = (disp) < 0; \ + g_assert(((disp) & 3) == 0); \ + *(p) = (0x68000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_copy(p, src, dest) \ + do { \ + *(p) = (0x34000000 | ((src) << 21) | ((dest) << 16)); \ + p++; \ + } while (0) + +#define hppa_ldd_with_flags(p, disp, base, dest, m, a) \ + do { \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + g_assert(((disp) & 7) == 0); \ + *(p) = (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p++; \ + } while (0) + +#define hppa_ldd(p, disp, base, dest) \ + hppa_ldd_with_flags(p, disp, base, dest, 0, 0) + +#define hppa_ldd_mb(p, disp, base, dest) \ + hppa_ldd_with_flags(p, disp, base, dest, 1, 1) + +#define hppa_std_with_flags(p, src, disp, base, m, a); \ + do { \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + g_assert(((disp) & 7) == 0); \ + *(p) = (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p++; \ + } while (0) + +#define hppa_std(p, disp, base, dest) \ + hppa_std_with_flags(p, disp, base, dest, 0, 0) + +#define hppa_std_ma(p, disp, base, dest) \ + hppa_std_with_flags(p, disp, base, dest, 1, 0) + +#define hppa_fldd_with_flags(p, disp, base, dest, m, a) \ + do { \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 2; \ + *(p) = (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p++; \ + } while (0) + +#define hppa_fldd(p, disp, base, dest) \ + hppa_fldd_with_flags(p, disp, base, dest, 0, 0) + +#define hppa_fstd_with_flags(p, src, disp, base, m, a) \ + do { \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 2; \ + *(p) = (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p++; \ + } while (0) + +#define hppa_fstd(p, disp, base, dest) \ + hppa_fstd_with_flags(p, disp, base, dest, 0, 0) + + +#define hppa_fldw_with_flags(p, im11a, base, dest, r) \ + do { \ + int neg = (disp) < 0; \ + int im11a = (disp) >> 2; \ + *(p) = (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)); \ + p++; \ + } while (0) + +#define hppa_fldw(p, disp, base, dest) \ + hppa_fldw_with_flags(p, disp, base, dest, 1) + +#define hppa_fstw_with_flags(p, src, disp, base, r) \ + do { \ + int neg = (disp) < 0; \ + int im11a = (disp) >> 2; \ + *(p) = (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)); \ + p++; \ + } while (0) + +#define hppa_fstw(p, src, disp, base) \ + hppa_fstw_with_flags(p, src, disp, base, 1) + +/* only works on right half SP registers */ +#define hppa_fcnv(p, src, ssng, dest, dsng) \ + do { \ + *(p) = (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)); \ + p++; \ + } while (0) + +#define hppa_fcnv_sng_dbl(p, src, dest) \ + hppa_fcnv(p, src, 1, dest, 0) + +#define hppa_fcnv_dbl_sng(p, src, dest) \ + hppa_fcnv(p, src, 0, dest, 1) + +#define hppa_ldil(p, val, dest) \ + do { \ + unsigned int t = (val >> 11) & 0x1fffff; \ + unsigned int im21 = ((t & 0x7c) << 14) | ((t & 0x180) << 7) | ((t & 0x3) << 12) | ((t & 0xffe00) >> 8) | ((t & 0x100000) >> 20); \ + *(p) = (0x20000000 | im21 | ((dest) << 21)); \ + p++; \ + } while (0) + +#define hppa_ldo(p, off, base, dest) \ + do { \ + int neg = (off) < 0; \ + *(p) = (0x34000000 | (((off) & 0x1fff)) << 1 | ((base) << 21) | ((dest) << 16) | neg); \ + p++; \ + } while (0) + +#define hppa_extrdu(p, src, pos, len, dest) \ + do { \ + *(p) = (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))); \ + p++; \ + } while (0) + +#define hppa_bve(p, reg, link) \ + do { \ + *(p) = (0xE8001000 | ((link ? 7 : 6) << 13) | ((reg) << 21)); \ + p++; \ + } while (0) + +#define hppa_blve(p, reg) \ + hppa_bve(p, reg, 1) diff --git a/hppa/tramp.c b/hppa/tramp.c index 0604eb6..64baa2a 100644 --- a/hppa/tramp.c +++ b/hppa/tramp.c @@ -27,6 +27,7 @@ #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" #include "mono/metadata/tabledefs.h" +#include "hppa-codegen.h" #if SIZEOF_VOID_P != 8 #error "HPPA code only currently supports 64bit pointers" @@ -35,219 +36,6 @@ // debugging flag which dumps code generated static int debug_asm = 0; -#define NOP 0x08000240 - -#define LDB(disp, base, dest, neg) (0x40000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) -#define STB(src, disp, base, neg) (0x60000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) - -#define LDH(disp, base, dest, neg) (0x44000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) -#define STH(src, disp, base, neg) (0x64000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) - -#define LDW(disp, base, dest, neg) (0x48000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg) -#define STW(src, disp, base, neg) (0x68000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg) - -#define COPY(src, dest) (0x34000000 | ((src) << 21) | ((dest) << 16)) -#define LDD(im10a, base, dest, m, a, neg) (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) -#define STD(src, im10a, base, m , a, neg) (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) - -#define FLDD(im10a, base, dest, m, a, neg) (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) -#define FSTD(src, im10a, base, m , a, neg) (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)) - -#define FLDW(im11a, base, dest, r, neg) (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)) -#define FSTW(src, im11a, base, r, neg) (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)) - -/* only works on right half SP registers */ -#define FCNV(src, ssng, dest, dsng) (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)) - -#define LDIL(im21, dest) (0x20000000 | im21 | ((dest) << 21)) - -#define LDO(off, base, dest, neg) (0x34000000 | (((off) & 0x1fff)) << 1 | ((base) << 21) | ((dest) << 16) | neg) - -#define EXTRDU(src, pos, len, dest) (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))) - -#define BVE(reg, link) (0xE8001000 | ((link ? 7 : 6) << 13) | ((reg) << 21)) - -static unsigned int gen_copy(int src, int dest) -{ - if (debug_asm) - fprintf(stderr, "COPY %d,%d\n", src, dest); - return COPY(src, dest); -} - -static unsigned int gen_ldb(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "LDB %d(%d),%d\n", disp, base, dest); - return LDB(disp, base, dest, neg); -} - -static unsigned int gen_stb(int src, int disp, int base) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "STB %d,%d(%d)\n", src, disp, base); - return STB(src, disp, base, neg); -} - -static unsigned int gen_ldh(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "LDH %d(%d),%d\n", disp, base, dest); - g_assert((disp & 1) == 0); - return LDH(disp, base, dest, neg); -} - -static unsigned int gen_sth(int src, int disp, int base) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "STH %d,%d(%d)\n", src, disp, base); - g_assert((disp & 1) == 0); - return STH(src, disp, base, neg); -} - -static unsigned int gen_ldw(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "LDW %d(%d),%d\n", disp, base, dest); - g_assert((disp & 3) == 0); - return LDW(disp, base, dest, neg); -} - -static unsigned int gen_stw(int src, int disp, int base) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "STW %d,%d(%d)\n", src, disp, base); - g_assert((disp & 3) == 0); - return STW(src, disp, base, neg); -} - -static unsigned int gen_ldd(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "LDD %d(%d),%d\n", disp, base, dest); - g_assert((disp & 7) == 0); - return LDD(disp >> 3, base, dest, 0, 0, neg); -} - -static unsigned int gen_lddmb(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "LDD,MB %d(%d),%d\n", disp, base, dest); - g_assert((disp & 7) == 0); - return LDD(disp >> 3, base, dest, 1, 1, neg); -} - -static unsigned int gen_std(int src, int disp, int base) -{ - int neg = disp < 0; - g_assert((disp & 7) == 0); - if (debug_asm) - fprintf(stderr, "STD %d,%d(%d)\n", src, disp, base); - return STD(src, disp >> 3, base, 0, 0, neg); -} - -static unsigned int gen_fldd(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "FLDD %d(%d),%d\n", disp, base, dest); - g_assert((disp & 7) == 0); - return FLDD(disp >> 3, base, dest, 0, 0, neg); -} - -static unsigned int gen_fstd(int src, int disp, int base) -{ - int neg = disp < 0; - g_assert((disp & 7) == 0); - if (debug_asm) - fprintf(stderr, "FSTD %d,%d(%d)\n", src, disp, base); - return FSTD(src, disp >> 3, base, 0, 0, neg); -} - -static unsigned int gen_fldw(int disp, int base, int dest) -{ - int neg = disp < 0; - if (debug_asm) - fprintf(stderr, "FLDW %d(%d),%dr\n", disp, base, dest); - g_assert((disp & 3) == 0); - return FLDW(disp >> 2, base, dest, 1, neg); -} - -static unsigned int gen_fstw(int src, int disp, int base) -{ - int neg = disp < 0; - g_assert((disp & 3) == 0); - if (debug_asm) - fprintf(stderr, "FSTW %dr,%d(%d)\n", src, disp, base); - return FSTW(src, disp >> 2, base, 1, neg); -} - -static unsigned int gen_fcnv_dbl_sng(int src, int dest) -{ - if (debug_asm) - fprintf(stderr, "FCNV,DBL,SGL %d,%dr\n", src, dest); - return FCNV(src, 0, dest, 1); -} - -static unsigned int gen_fcnv_sng_dbl(int src, int dest) -{ - if (debug_asm) - fprintf(stderr, "FCNV,SGL,DBL %dr,%d\n", src, dest); - return FCNV(src, 1, dest, 0); -} - -static unsigned int gen_stdma(int src, int disp, int base) -{ - int neg = disp < 0; - g_assert((disp & 7) == 0); - if (debug_asm) - fprintf(stderr, "STD,MA %d,%d(%d)\n", src, disp, base); - return STD(src, disp >> 3, base, 1, 0, neg); -} - -/* load top 21 bits of val into reg */ -static unsigned int gen_ldil(unsigned int val, int reg) -{ - unsigned int t = (val >> 11) & 0x1fffff; - unsigned int im21 = ((t & 0x7c) << 14) | ((t & 0x180) << 7) | ((t & 0x3) << 12) | ((t & 0xffe00) >> 8) | ((t & 0x100000) >> 20); - return LDIL(reg, im21); -} - -static unsigned int gen_ldo(int off, int base, int reg) -{ - int neg = off < 0; - if (debug_asm) - fprintf(stderr, "LDO %d(%d),%d\n", off, base, reg); - return LDO(off, base, reg, neg); -} - -static unsigned int gen_nop(void) -{ - if (debug_asm) - fprintf(stderr, "NOP\n"); - return NOP; -} - -static unsigned int gen_bve(int reg, int link) -{ - if (debug_asm) - fprintf(stderr, "BVE%s (%d)%s\n", link ? ",L" : "", reg, link ? ",2" : ""); - return BVE(reg, link); -} - -static unsigned int gen_extrdu(int src, int pos, int len, int dest) -{ - if (debug_asm) - fprintf(stderr, "EXTRD,U %d,%d,%d,%d\n", src, pos, len, dest); - return EXTRDU(src, pos, len, dest); -} static void flush_cache(void *address, int length) { @@ -288,7 +76,21 @@ static void flush_cache(void *address, int length) #endif } -#define ADD_INST(code, pc, gen_exp) ((code) == NULL ? (pc)++ : (code[(pc)++] = (gen_exp))) +static void disassemble (guint32 *code, int n_instrs) +{ + const char *tmp_file = "/tmp/mono_adb.in"; + FILE *fp = fopen(tmp_file, "w"); + int i; + for (i = 0; i < n_instrs; i++) + fprintf(fp, "0x%08x=i\n", code[i]); + fprintf(fp, "$q\n"); + fclose(fp); + system("adb64 param_count > 0) - ADD_INST(code, pc, gen_copy(23, 4)); // r4 is the current pointer to the stackval array of args + ADD_INST(code, pc, hppa_copy(code, 23, 4)); // r4 is the current pointer to the stackval array of args if (sig->hasthis) { if (sig->call_convention != MONO_CALL_THISCALL) { - ADD_INST(code, pc, gen_copy(24, arg_reg)); + ADD_INST(code, pc, hppa_copy(code, 24, arg_reg)); --arg_reg; parameter_slot += 8; } else { @@ -396,10 +199,10 @@ generate: int type = sig->params[param]->type; if (sig->params[param]->byref) { if (args_on_stack) { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); - ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); } else { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); --arg_reg; } arg_offset += sizeof(stackval); @@ -417,25 +220,25 @@ generate: case MONO_TYPE_I4: case MONO_TYPE_U4: if (args_on_stack) { - ADD_INST(code, pc, gen_ldw(arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_ldw(code, arg_offset, 4, 5)); switch (type) { case MONO_TYPE_I4: case MONO_TYPE_U4: - ADD_INST(code, pc, gen_stw(5, parameter_slot + 4, 30)); + ADD_INST(code, pc, hppa_stw(code, 5, parameter_slot + 4, 30)); break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: - ADD_INST(code, pc, gen_sth(5, parameter_slot + 6, 30)); + ADD_INST(code, pc, hppa_sth(code, 5, parameter_slot + 6, 30)); break; case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - ADD_INST(code, pc, gen_stb(5, parameter_slot + 7, 30)); + ADD_INST(code, pc, hppa_stb(code, 5, parameter_slot + 7, 30)); break; } } else { - ADD_INST(code, pc, gen_ldw(arg_offset, 4, arg_reg)); + ADD_INST(code, pc, hppa_ldw(code, arg_offset, 4, arg_reg)); --arg_reg; } arg_offset += sizeof(stackval); @@ -451,10 +254,10 @@ generate: case MONO_TYPE_SZARRAY: case MONO_TYPE_PTR: if (args_on_stack) { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); - ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); } else { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); --arg_reg; } arg_offset += sizeof(stackval); @@ -462,10 +265,10 @@ generate: break; case MONO_TYPE_R8: if (args_on_stack) { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); - ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); } else { - ADD_INST(code, pc, gen_fldd(arg_offset, 4, FP_ARG_REG(arg_reg))); + ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, FP_ARG_REG(arg_reg))); --arg_reg; } arg_offset += sizeof(stackval); @@ -473,12 +276,12 @@ generate: break; case MONO_TYPE_R4: if (args_on_stack) { - ADD_INST(code, pc, gen_fldd(arg_offset, 4, 22)); - ADD_INST(code, pc, gen_fcnv_dbl_sng(22, 22)); - ADD_INST(code, pc, gen_fstw(22, parameter_slot + 4, 30)); + ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, 22)); + ADD_INST(code, pc, hppa_fcnv_dbl_sng(code, 22, 22)); + ADD_INST(code, pc, hppa_fstw(code, 22, parameter_slot + 4, 30)); } else { - ADD_INST(code, pc, gen_fldd(arg_offset, 4, FP_ARG_REG(arg_reg))); - ADD_INST(code, pc, gen_fcnv_dbl_sng(FP_ARG_REG(arg_reg), FP_ARG_REG(arg_reg))); + ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, FP_ARG_REG(arg_reg))); + ADD_INST(code, pc, hppa_fcnv_dbl_sng(code, FP_ARG_REG(arg_reg), FP_ARG_REG(arg_reg))); --arg_reg; } arg_offset += sizeof(stackval); @@ -494,12 +297,12 @@ generate: // copies multiple of 8 bytes which may include some trailing garbage but should be safe if (size <= 8) { if (args_on_stack) { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); - ADD_INST(code, pc, gen_ldd(0, 5, 5)); - ADD_INST(code, pc, gen_std(5, parameter_slot, 30)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_ldd(code, 0, 5, 5)); + ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); } else { - ADD_INST(code, pc, gen_ldd(arg_offset, 4, arg_reg)); - ADD_INST(code, pc, gen_ldd(0, arg_reg, arg_reg)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); + ADD_INST(code, pc, hppa_ldd(code, 0, arg_reg, arg_reg)); --arg_reg; } parameter_slot += 8; @@ -512,15 +315,15 @@ generate: } parameter_slot += 8; } - ADD_INST(code, pc, gen_ldd(arg_offset, 4, 5)); + ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); // might generate a lot of code for very large structs... should // use a loop or routine call them while (size > 0) { if (args_on_stack) { - ADD_INST(code, pc, gen_ldd(soffset, 5, 31)); - ADD_INST(code, pc, gen_std(31, parameter_slot, 30)); + ADD_INST(code, pc, hppa_ldd(code, soffset, 5, 31)); + ADD_INST(code, pc, hppa_std(code, 31, parameter_slot, 30)); } else { - ADD_INST(code, pc, gen_ldd(soffset, 5, arg_reg)); + ADD_INST(code, pc, hppa_ldd(code, soffset, 5, arg_reg)); --arg_reg; if (arg_reg < 19) args_on_stack = 1; @@ -548,23 +351,23 @@ generate: if (sig->ret->type == MONO_TYPE_VALUETYPE && sig->ret->data.klass->enumtype == 0) { int size = mono_class_native_size (sig->ret->data.klass, NULL); if (size > 16) { - ADD_INST(code, pc, gen_ldd(-56, 3, 28)); - ADD_INST(code, pc, gen_ldd(0, 28, 28)); + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 28)); + ADD_INST(code, pc, hppa_ldd(code, 0, 28, 28)); } } - ADD_INST(code, pc, gen_nop()); // NOP - ADD_INST(code, pc, gen_ldd(-64, 29, 5)); - ADD_INST(code, pc, gen_ldd(24, 5, 27)); - ADD_INST(code, pc, gen_ldd(16, 5, 5)); - ADD_INST(code, pc, gen_bve(5, 1)); - ADD_INST(code, pc, gen_ldo(parameter_offset + 64, 30, 29)); - ADD_INST(code, pc, gen_ldd(spill_offset + 16, 30, 27)); - ADD_INST(code, pc, gen_nop()); // NOP + ADD_INST(code, pc, hppa_nop(code)); // NOP + ADD_INST(code, pc, hppa_ldd(code, -64, 29, 5)); + ADD_INST(code, pc, hppa_ldd(code, 24, 5, 27)); + ADD_INST(code, pc, hppa_ldd(code, 16, 5, 5)); + ADD_INST(code, pc, hppa_blve(code, 5)); + ADD_INST(code, pc, hppa_ldo(code, parameter_offset + 64, 30, 29)); + ADD_INST(code, pc, hppa_ldd(code, spill_offset + 16, 30, 27)); + ADD_INST(code, pc, hppa_nop(code)); // NOP if (string_ctor) { - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_std(28, 0, 19)); // STD %r28,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); // STD %r28,0(%r19) } else if (sig->ret->type != MONO_TYPE_VOID) { int type = sig->ret->type; @@ -574,19 +377,19 @@ generate: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_stb(28, 0, 19)); // STB %r28,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_stb(code, 28, 0, 19)); // STB %r28,0(%r19) break; case MONO_TYPE_I4: case MONO_TYPE_U4: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_stw(28, 0, 19)); // STW %r28,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_stw(code, 28, 0, 19)); // STW %r28,0(%r19) break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_sth(28, 0, 19)); // STH %r28,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_sth(code, 28, 0, 19)); // STH %r28,0(%r19) break; case MONO_TYPE_I8: case MONO_TYPE_U8: @@ -597,16 +400,16 @@ generate: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_PTR: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_std(28, 0, 19)); // STD %r28,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); // STD %r28,0(%r19) break; case MONO_TYPE_R8: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_fstd(4, 0, 19)); // FSTD %fr4,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_fstd(code, 4, 0, 19)); // FSTD %fr4,0(%r19) break; case MONO_TYPE_R4: - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, gen_fstw(4, 0, 19)); // FSTW %fr4r,0(%r19) + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 + ADD_INST(code, pc, hppa_fstw(code, 4, 0, 19)); // FSTW %fr4r,0(%r19) break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { @@ -617,28 +420,28 @@ generate: if (size <= 16) { int reg = 28; int off = 0; - ADD_INST(code, pc, gen_ldd(-56, 3, 19)); - ADD_INST(code, pc, gen_ldd(0, 19, 19)); + ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); + ADD_INST(code, pc, hppa_ldd(code, 0, 19, 19)); if (size > 8) { - ADD_INST(code, pc, gen_std(28, 0, 19)); + ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); size -= 8; reg = 29; off += 8; } // get rest of value right aligned in the register - ADD_INST(code, pc, gen_extrdu(reg, 8 * size - 1, 8 * size, reg)); + ADD_INST(code, pc, hppa_extrdu(code, reg, 8 * size - 1, 8 * size, reg)); if ((size & 1) != 0) { - ADD_INST(code, pc, gen_stb(reg, off + size - 1, 19)); - ADD_INST(code, pc, gen_extrdu(reg, 55, 56, reg)); + ADD_INST(code, pc, hppa_stb(code, reg, off + size - 1, 19)); + ADD_INST(code, pc, hppa_extrdu(code, reg, 55, 56, reg)); size -= 1; } if ((size & 2) != 0) { - ADD_INST(code, pc, gen_sth(reg, off + size - 2, 19)); - ADD_INST(code, pc, gen_extrdu(reg, 47, 48, reg)); + ADD_INST(code, pc, hppa_sth(code, reg, off + size - 2, 19)); + ADD_INST(code, pc, hppa_extrdu(code, reg, 47, 48, reg)); size -= 2; } if ((size & 4) != 0) - ADD_INST(code, pc, gen_stw(reg, off + size - 4, 19)); + ADD_INST(code, pc, hppa_stw(code, reg, off + size - 4, 19)); } break; } @@ -648,29 +451,32 @@ generate: } } - ADD_INST(code, pc, gen_ldd(-frame_size-16, 30, 2)); - ADD_INST(code, pc, gen_ldd(spill_offset, 30, 4)); - ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 5)); - ADD_INST(code, pc, gen_bve(2, 0)); - ADD_INST(code, pc, gen_lddmb(-frame_size, 30, 3)); + ADD_INST(code, pc, hppa_ldd(code, -frame_size-16, 30, 2)); + ADD_INST(code, pc, hppa_ldd(code, spill_offset, 30, 4)); + ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 5)); + ADD_INST(code, pc, hppa_bve(code, 2, 0)); + ADD_INST(code, pc, hppa_ldd_mb(code, -frame_size, 30, 3)); if (code == NULL) { descriptor = (void **)g_malloc(4 * sizeof(void *) + pc * sizeof(unsigned int)); code = (unsigned int *)((char *)descriptor + 4 * sizeof(void *)); + code_start = code; save_pc = pc; goto generate; } else g_assert(pc == save_pc); - if (debug_asm) + if (debug_asm) { fprintf(stderr, "generated: %d bytes\n", pc * 4); + disassemble(code_start, pc); + } // must do this so we can actually execute the code we just put in memory - flush_cache(code, 4 * pc); + flush_cache(code_start, 4 * pc); descriptor[0] = 0; descriptor[1] = 0; - descriptor[2] = code; + descriptor[2] = code_start; descriptor[3] = 0; return (MonoPIFunc)descriptor; @@ -687,6 +493,7 @@ mono_arch_create_method_pointer (MonoMethod *method) void **descriptor = NULL; void **data = NULL; unsigned int *code = NULL; + unsigned int *code_start = NULL; int arg_reg = 26; int arg_offset = 0; int frame_size; @@ -741,37 +548,37 @@ generate: arg_val_pos = -64; pc = 0; - ADD_INST(code, pc, gen_std(2, -16, 30)); - ADD_INST(code, pc, gen_stdma(3, frame_size, 30)); - ADD_INST(code, pc, gen_std(4, spill_offset, 30)); - ADD_INST(code, pc, gen_copy(29, 3)); - ADD_INST(code, pc, gen_std(27, spill_offset + 8, 30)); - ADD_INST(code, pc, gen_std(28, spill_offset + 16, 30)); - ADD_INST(code, pc, gen_nop()); - - ADD_INST(code, pc, gen_std(26, -64, 29)); // STD %r26,-64(%r29) - ADD_INST(code, pc, gen_std(25, -56, 29)); // STD %r25,-56(%r29) - ADD_INST(code, pc, gen_std(24, -48, 29)); // STD %r24,-48(%r29) - ADD_INST(code, pc, gen_std(23, -40, 29)); // STD %r23,-40(%r29) - ADD_INST(code, pc, gen_std(22, -32, 29)); // STD %r22,-32(%r29) - ADD_INST(code, pc, gen_std(21, -24, 29)); // STD %r21,-24(%r29) - ADD_INST(code, pc, gen_std(20, -16, 29)); // STD %r20,-16(%r29) - ADD_INST(code, pc, gen_std(19, -8, 29)); // STD %r19,-8(%r29) - - ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, parent), 30)); - ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, child), 30)); - ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex), 30)); - ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex_handler), 30)); - ADD_INST(code, pc, gen_std(0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ip), 30)); + ADD_INST(code, pc, hppa_std(code, 2, -16, 30)); + ADD_INST(code, pc, hppa_std_ma(code, 3, frame_size, 30)); + ADD_INST(code, pc, hppa_std(code, 4, spill_offset, 30)); + ADD_INST(code, pc, hppa_copy(code, 29, 3)); + ADD_INST(code, pc, hppa_std(code, 27, spill_offset + 8, 30)); + ADD_INST(code, pc, hppa_std(code, 28, spill_offset + 16, 30)); + ADD_INST(code, pc, hppa_nop(code)); + + ADD_INST(code, pc, hppa_std(code, 26, -64, 29)); // STD %r26,-64(%r29) + ADD_INST(code, pc, hppa_std(code, 25, -56, 29)); // STD %r25,-56(%r29) + ADD_INST(code, pc, hppa_std(code, 24, -48, 29)); // STD %r24,-48(%r29) + ADD_INST(code, pc, hppa_std(code, 23, -40, 29)); // STD %r23,-40(%r29) + ADD_INST(code, pc, hppa_std(code, 22, -32, 29)); // STD %r22,-32(%r29) + ADD_INST(code, pc, hppa_std(code, 21, -24, 29)); // STD %r21,-24(%r29) + ADD_INST(code, pc, hppa_std(code, 20, -16, 29)); // STD %r20,-16(%r29) + ADD_INST(code, pc, hppa_std(code, 19, -8, 29)); // STD %r19,-8(%r29) + + ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, parent), 30)); + ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, child), 30)); + ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex), 30)); + ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex_handler), 30)); + ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ip), 30)); if (data != NULL) data[0] = method; - ADD_INST(code, pc, gen_ldd(0, 27, 19)); - ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, method), 30)); + ADD_INST(code, pc, hppa_ldd(code, 0, 27, 19)); + ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, method), 30)); if (sig->hasthis) { if (sig->call_convention != MONO_CALL_THISCALL) { - ADD_INST(code, pc, gen_std(arg_reg, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, obj), 30)); + ADD_INST(code, pc, hppa_std(code, arg_reg, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, obj), 30)); arg_val_pos += 8; } else { fprintf(stderr, "case I didn't handle 2\n"); @@ -784,10 +591,10 @@ generate: for (i = 0; i < sig->param_count; ++i) { if (data != NULL) data[4 + i] = sig->params[i]; - ADD_INST(code, pc, gen_ldd((4 + i) * 8, 27, 26)); // LDD x(%r27),%r26 == type - ADD_INST(code, pc, gen_ldo(stack_val_pos, 30, 25)); // LDD x(%r30),%r25 == &stackval + ADD_INST(code, pc, hppa_ldd(code, (4 + i) * 8, 27, 26)); // LDD x(%r27),%r26 == type + ADD_INST(code, pc, hppa_ldo(code, stack_val_pos, 30, 25)); // LDD x(%r30),%r25 == &stackval if (sig->params[i]->byref) { - ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); } else { int type = sig->params[i]->type; typeswitch: @@ -802,21 +609,21 @@ generate: case MONO_TYPE_SZARRAY: case MONO_TYPE_PTR: case MONO_TYPE_R8: - ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); break; case MONO_TYPE_I4: case MONO_TYPE_U4: - ADD_INST(code, pc, gen_ldo(arg_val_pos + 4, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 4, 3, 24)); break; case MONO_TYPE_CHAR: case MONO_TYPE_I2: case MONO_TYPE_U2: - ADD_INST(code, pc, gen_ldo(arg_val_pos + 6, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 6, 3, 24)); break; case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_BOOLEAN: - ADD_INST(code, pc, gen_ldo(arg_val_pos + 7, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 7, 3, 24)); break; case MONO_TYPE_VALUETYPE: if (sig->params [i]->data.klass->enumtype) { @@ -825,11 +632,11 @@ generate: } else { int size = mono_class_native_size (sig->params[i]->data.klass, NULL); if (size <= 8) - ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); else { arg_val_pos += 15; arg_val_pos &= ~15; - ADD_INST(code, pc, gen_ldo(arg_val_pos, 3, 24)); + ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); } arg_val_pos += size; @@ -837,8 +644,8 @@ generate: arg_val_pos &= ~7; arg_val_pos -=8 ; // as it is incremented later - ADD_INST(code, pc, gen_ldo(vtoffsets[i], 30, 19)); - ADD_INST(code, pc, gen_std(19, 0, 25)); + ADD_INST(code, pc, hppa_ldo(code, vtoffsets[i], 30, 19)); + ADD_INST(code, pc, hppa_std(code, 19, 0, 25)); } break; default: @@ -847,44 +654,44 @@ generate: } } - ADD_INST(code, pc, gen_ldo(sig->pinvoke, 0, 23)); // LDI sig->pinvoke,%r23 - ADD_INST(code, pc, gen_ldd(16, 27, 19)); // LDD x(%r27),%r19 == stackval_from_data - ADD_INST(code, pc, gen_ldd(16, 19, 20)); // LDD 16(%r19),%r20 - ADD_INST(code, pc, gen_ldd(24, 19, 27)); // LDD 24(%r19),%r27 - ADD_INST(code, pc, gen_bve(20, 1)); // BVE,L (%r20),%r2 - ADD_INST(code, pc, gen_ldo(-16, 30, 29)); // LDO -16(%r30),%r29 - ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 27)); + ADD_INST(code, pc, hppa_ldo(code, sig->pinvoke, 0, 23)); // LDI sig->pinvoke,%r23 + ADD_INST(code, pc, hppa_ldd(code, 16, 27, 19)); // LDD x(%r27),%r19 == stackval_from_data + ADD_INST(code, pc, hppa_ldd(code, 16, 19, 20)); // LDD 16(%r19),%r20 + ADD_INST(code, pc, hppa_ldd(code, 24, 19, 27)); // LDD 24(%r19),%r27 + ADD_INST(code, pc, hppa_blve(code, 20)); // BVE,L (%r20),%r2 + ADD_INST(code, pc, hppa_ldo(code, -16, 30, 29)); // LDO -16(%r30),%r29 + ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 27)); stack_val_pos += sizeof (stackval); arg_val_pos += 8; g_assert(stack_val_pos < -96); } - ADD_INST(code, pc, gen_ldo(stack_vals_offset, 30, 19)); - ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, stack_args), 30)); - ADD_INST(code, pc, gen_ldo(stack_val_pos, 30, 19)); - ADD_INST(code, pc, gen_std(19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, retval), 30)); + ADD_INST(code, pc, hppa_ldo(code, stack_vals_offset, 30, 19)); + ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, stack_args), 30)); + ADD_INST(code, pc, hppa_ldo(code, stack_val_pos, 30, 19)); + ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, retval), 30)); if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) { int size = mono_class_native_size (sig->ret->data.klass, NULL); // for large return structs pass on the pointer given us by our caller. if (size > 16) - ADD_INST(code, pc, gen_ldd(spill_offset + 16, 30, 28)); + ADD_INST(code, pc, hppa_ldd(code, spill_offset + 16, 30, 28)); else // use space left on stack for the return value - ADD_INST(code, pc, gen_ldo(stack_val_pos + sizeof(stackval), 30, 28)); - ADD_INST(code, pc, gen_std(28, stack_val_pos, 30)); + ADD_INST(code, pc, hppa_ldo(code, stack_val_pos + sizeof(stackval), 30, 28)); + ADD_INST(code, pc, hppa_std(code, 28, stack_val_pos, 30)); } - ADD_INST(code, pc, gen_ldo(invoke_rec_offset, 30, 26)); // address of invocation + ADD_INST(code, pc, hppa_ldo(code, invoke_rec_offset, 30, 26)); // address of invocation if (data != NULL) data[1] = (void *)ves_exec_method; - ADD_INST(code, pc, gen_ldd(8, 27, 19)); // LDD 8(%r27),%r19 - ADD_INST(code, pc, gen_ldd(16, 19, 20)); // LDD 16(%r19),%r20 - ADD_INST(code, pc, gen_ldd(24, 19, 27)); // LDD 24(%r19),%r27 - ADD_INST(code, pc, gen_bve(20, 1)); // BVE,L (%r20),%r2 - ADD_INST(code, pc, gen_ldo(-16, 30, 29)); // LDO -16(%r30),%r29 - ADD_INST(code, pc, gen_ldd(spill_offset + 8, 30, 27)); + ADD_INST(code, pc, hppa_ldd(code, 8, 27, 19)); // LDD 8(%r27),%r19 + ADD_INST(code, pc, hppa_ldd(code, 16, 19, 20)); // LDD 16(%r19),%r20 + ADD_INST(code, pc, hppa_ldd(code, 24, 19, 27)); // LDD 24(%r19),%r27 + ADD_INST(code, pc, hppa_blve(code, 20)); // BVE,L (%r20),%r2 + ADD_INST(code, pc, hppa_ldo(code, -16, 30, 29)); // LDO -16(%r30),%r29 + ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 27)); if (sig->ret->byref) { fprintf(stderr, "can'ty cope with ret byref\n"); } else { @@ -901,7 +708,7 @@ generate: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: - ADD_INST(code, pc, gen_ldw(stack_val_pos, 30, 28)); // LDW x(%r30),%r28 + ADD_INST(code, pc, hppa_ldw(code, stack_val_pos, 30, 28)); // LDW x(%r30),%r28 break; case MONO_TYPE_I8: case MONO_TYPE_U8: @@ -912,10 +719,10 @@ generate: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: case MONO_TYPE_PTR: - ADD_INST(code, pc, gen_ldd(stack_val_pos, 30, 28)); // LDD x(%r30),%r28 + ADD_INST(code, pc, hppa_ldd(code, stack_val_pos, 30, 28)); // LDD x(%r30),%r28 break; case MONO_TYPE_R8: - ADD_INST(code, pc, gen_fldd(stack_val_pos, 30, 4)); // FLDD x(%r30),%fr4 + ADD_INST(code, pc, hppa_fldd(code, stack_val_pos, 30, 4)); // FLDD x(%r30),%fr4 break; case MONO_TYPE_VALUETYPE: if (sig->ret->data.klass->enumtype) { @@ -924,10 +731,10 @@ generate: } else { int size = mono_class_native_size (sig->ret->data.klass, NULL); if (size <= 16) { - ADD_INST(code, pc, gen_ldd(stack_val_pos, 30, 28)); + ADD_INST(code, pc, hppa_ldd(code, stack_val_pos, 30, 28)); if (size > 8) - ADD_INST(code, pc, gen_ldd(8, 28, 29)); - ADD_INST(code, pc, gen_ldd(0, 28, 28)); + ADD_INST(code, pc, hppa_ldd(code, 8, 28, 29)); + ADD_INST(code, pc, hppa_ldd(code, 0, 28, 28)); } } break; @@ -937,22 +744,28 @@ generate: } } - ADD_INST(code, pc, gen_ldd(-frame_size-16, 30, 2)); - ADD_INST(code, pc, gen_ldd(spill_offset, 30, 4)); - ADD_INST(code, pc, gen_bve(2, 0)); - ADD_INST(code, pc, gen_lddmb(-frame_size, 30, 3)); + ADD_INST(code, pc, hppa_ldd(code, -frame_size-16, 30, 2)); + ADD_INST(code, pc, hppa_ldd(code, spill_offset, 30, 4)); + ADD_INST(code, pc, hppa_bve(code, 2, 0)); + ADD_INST(code, pc, hppa_ldd_mb(code, -frame_size, 30, 3)); if (code == NULL) { descriptor = (void **)malloc((8 + sig->param_count) * sizeof(void *) + sizeof(unsigned int) * pc); data = descriptor + 4; code = (unsigned int *)(data + 4 + sig->param_count); + code_start = code; goto generate; } - flush_cache(code, 4 * pc); + if (debug_asm) { + fprintf(stderr, "generated: %d bytes\n", pc * 4); + disassemble(code_start, pc); + } + + flush_cache(code_start, 4 * pc); descriptor[0] = 0; descriptor[1] = 0; - descriptor[2] = code; + descriptor[2] = code_start; descriptor[3] = data; ji = g_new0 (MonoJitInfo, 1); -- cgit v1.1 From e82c4f6b16e7d3a7bdabe2df046b7ce17d91e716 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Tue, 30 Mar 2004 01:18:11 +0000 Subject: 2004-03-29 Bernie Solomon * amd64/tramp.c: * arm/tramp.c: * hppa/tramp.c: * ppc/tramp.c: * s390/tramp.c: * sparc/tramp.c: * x86/tramp.c: remove child from MonoInvocation as it isn't used. svn path=/trunk/mono/; revision=24751 --- ChangeLog | 11 +++++++++++ amd64/tramp.c | 1 - arm/tramp.c | 1 - hppa/tramp.c | 1 - ppc/tramp.c | 1 - s390/tramp.c | 3 +-- sparc/tramp.c | 2 -- x86/tramp.c | 1 - 8 files changed, 12 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index c2f9a07..622479e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,14 @@ +2004-03-29 Bernie Solomon + + * amd64/tramp.c: + * arm/tramp.c: + * hppa/tramp.c: + * ppc/tramp.c: + * s390/tramp.c: + * sparc/tramp.c: + * x86/tramp.c: + remove child from MonoInvocation as it isn't used. + 2004-03-23 Bernie Solomon * hppa/hppa-codegen.h: created diff --git a/amd64/tramp.c b/amd64/tramp.c index cfe3ff1..09f80f2 100644 --- a/amd64/tramp.c +++ b/amd64/tramp.c @@ -885,7 +885,6 @@ enum_calc_size: amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, 8); amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, child)), AMD64_RAX, 8); amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, 8); /* * Set the method pointer. diff --git a/arm/tramp.c b/arm/tramp.c index 8bebf1b..f1f0c7c 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -487,7 +487,6 @@ void* mono_arch_create_method_pointer (MonoMethod* method) ARM_MOV_REG_IMM8(p, ARMREG_R4, 0); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex)); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler)); - ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(child)); ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent)); /* Set the method pointer. */ diff --git a/hppa/tramp.c b/hppa/tramp.c index 64baa2a..1c09c9a 100644 --- a/hppa/tramp.c +++ b/hppa/tramp.c @@ -566,7 +566,6 @@ generate: ADD_INST(code, pc, hppa_std(code, 19, -8, 29)); // STD %r19,-8(%r29) ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, parent), 30)); - ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, child), 30)); ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex), 30)); ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex_handler), 30)); ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ip), 30)); diff --git a/ppc/tramp.c b/ppc/tramp.c index 124eabf..4e0c816 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -682,7 +682,6 @@ mono_arch_create_method_pointer (MonoMethod *method) ppc_li (p, ppc_r0, 0); ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), ppc_r31); ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), ppc_r31); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), ppc_r31); ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), ppc_r31); /* set method pointer */ diff --git a/s390/tramp.c b/s390/tramp.c index 912e8fe..50fa1a4 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -819,7 +819,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /* allocate a MonoInvocation structure (inv) on the stack */ /* allocate an array of stackval on the stack with length = */ /* method->signature->param_count + 1 [call it stack_args] */ -/* set inv->ex, inv->ex_handler, inv->child, inv->parent to */ +/* set inv->ex, inv->ex_handler,inv->parent to */ /* NULL */ /* set inv->method to method */ /* if method is an instance method, set inv->obj to the */ @@ -894,7 +894,6 @@ mono_arch_create_method_pointer (MonoMethod *method) s390_lhi (p, s390_r0, 0); s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); s390_lhi (p, s390_r0, 1); s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); diff --git a/sparc/tramp.c b/sparc/tramp.c index ca6dd08..a90fff4 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -832,8 +832,6 @@ mono_arch_create_method_pointer (MonoMethod *method) sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); sparc_st_imm_ptr (p, sparc_g0, sparc_sp, - (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child))); - sparc_st_imm_ptr (p, sparc_g0, sparc_sp, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); sparc_set_ptr (p, (void *)method, sparc_l0); diff --git a/x86/tramp.c b/x86/tramp.c index 3b2e7c9..ee98193 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -395,7 +395,6 @@ mono_arch_create_method_pointer (MonoMethod *method) x86_mov_reg_imm (p, X86_EAX, 0); x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), X86_EAX, 4); x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), X86_EAX, 4); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, child)), X86_EAX, 4); x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), X86_EAX, 4); /* * Set the method pointer. -- cgit v1.1 From 8adf42aeb550308e5a30e4308ad639fafa27e7e3 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Tue, 30 Mar 2004 01:44:17 +0000 Subject: 2004-03-29 Bernie Solomon * hppa/hppa-codegen.h: fix displacements in FP instrs svn path=/trunk/mono/; revision=24755 --- ChangeLog | 3 +++ hppa/hppa-codegen.h | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 622479e..9daf263 100644 --- a/ChangeLog +++ b/ChangeLog @@ -9,6 +9,9 @@ * x86/tramp.c: remove child from MonoInvocation as it isn't used. + * hppa/hppa-codegen.h: + fix displacements in FP instrs + 2004-03-23 Bernie Solomon * hppa/hppa-codegen.h: created diff --git a/hppa/hppa-codegen.h b/hppa/hppa-codegen.h index 9afd3dd..0a9586f 100644 --- a/hppa/hppa-codegen.h +++ b/hppa/hppa-codegen.h @@ -127,7 +127,7 @@ typedef enum { #define hppa_fldd_with_flags(p, disp, base, dest, m, a) \ do { \ int neg = (disp) < 0; \ - int im10a = (disp) >> 2; \ + int im10a = (disp) >> 3; \ *(p) = (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ p++; \ } while (0) @@ -138,7 +138,7 @@ typedef enum { #define hppa_fstd_with_flags(p, src, disp, base, m, a) \ do { \ int neg = (disp) < 0; \ - int im10a = (disp) >> 2; \ + int im10a = (disp) >> 3; \ *(p) = (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ p++; \ } while (0) -- cgit v1.1 From 9b84c8398a2558c61613ec50d3c3546627ac1e2d Mon Sep 17 00:00:00 2001 From: Raja R Harinath Date: Tue, 13 Apr 2004 04:31:05 +0000 Subject: ignores svn path=/trunk/mono/; revision=25379 --- amd64/.cvsignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 amd64/.cvsignore diff --git a/amd64/.cvsignore b/amd64/.cvsignore new file mode 100644 index 0000000..3dda729 --- /dev/null +++ b/amd64/.cvsignore @@ -0,0 +1,2 @@ +Makefile.in +Makefile -- cgit v1.1 From ab07311f8d1aeb258795fc72c5ed216f603db092 Mon Sep 17 00:00:00 2001 From: David Waite Date: Tue, 27 Apr 2004 04:13:19 +0000 Subject: 2004-04-26 David Waite * unknown.c: modify to have content for defined platforms (to avoid ISO C warning) svn path=/trunk/mono/; revision=26036 --- ChangeLog | 5 +++++ unknown.c | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 9daf263..8aee7f0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2004-04-26 David Waite + + * unknown.c: modify to have content for defined platforms (to + avoid ISO C warning) + 2004-03-29 Bernie Solomon * amd64/tramp.c: diff --git a/unknown.c b/unknown.c index cdb7a4a..d865299 100644 --- a/unknown.c +++ b/unknown.c @@ -1,6 +1,5 @@ -#ifdef NO_PORT #include "mono/interpreter/interp.h" - +#ifdef NO_PORT MonoPIFunc mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { -- cgit v1.1 From 92e3edf52f04c550767f3ae59c0f7fcefb46cbf8 Mon Sep 17 00:00:00 2001 From: "Urs C. Muff" Date: Wed, 28 Apr 2004 03:59:07 +0000 Subject: cleanup svn path=/trunk/mono/; revision=26114 --- amd64/.cvsignore | 1 + 1 file changed, 1 insertion(+) diff --git a/amd64/.cvsignore b/amd64/.cvsignore index 3dda729..e440faf 100644 --- a/amd64/.cvsignore +++ b/amd64/.cvsignore @@ -1,2 +1,3 @@ Makefile.in Makefile +.deps -- cgit v1.1 From f05e6864576c8c9e827cf6affbaff770732628d4 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 29 Apr 2004 18:59:24 +0000 Subject: Fix stmw opcode with signed offsets. svn path=/trunk/mono/; revision=26328 --- ppc/ppc-codegen.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index d09afd5..1fb7ce3 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -577,7 +577,7 @@ my and Ximian's copyright to this code. ;) #define ppc_sthu(c,S,A,D) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | D) #define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0) #define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0) -#define ppc_stmw(c,S,A,D) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stmw(c,S,A,D) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)D) #define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0) #define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0) #define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0) @@ -631,8 +631,8 @@ my and Ximian's copyright to this code. ;) #define ppc_xor(c,A,S,B) ppc_xorx(c,A,S,B,0) #define ppc_xord(c,A,S,B) ppc_xorx(c,A,S,B,1) -#define ppc_xori(c,S,A,SIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(SIMM)) -#define ppc_xoris(c,S,A,SIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(SIMM)) +#define ppc_xori(c,S,A,UIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM)) +#define ppc_xoris(c,S,A,UIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM)) /* this marks the end of my work, ct */ -- cgit v1.1 From e79a83571f6126771c5e997560dd7e15c540df3f Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Fri, 30 Apr 2004 03:47:45 +0000 Subject: 2004-04-29 Bernie Solomon * ppc/tramp.c: use sizeof (stackval), fix delegate tramp frame layout for Apple svn path=/trunk/mono/; revision=26383 --- ChangeLog | 5 +++ ppc/tramp.c | 125 +++++++++++++++++++++++++++++++++++++----------------------- 2 files changed, 83 insertions(+), 47 deletions(-) diff --git a/ChangeLog b/ChangeLog index 8aee7f0..d95fb3e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2004-04-29 Bernie Solomon + + * ppc/tramp.c: use sizeof (stackval), fix + delegate tramp frame layout for Apple + 2004-04-26 David Waite * unknown.c: modify to have content for defined platforms (to diff --git a/ppc/tramp.c b/ppc/tramp.c index 4e0c816..f6c037d 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -58,6 +58,46 @@ flush_icache (guint8 *code, guint size) asm ("isync"); } +static void +disassemble (guint8 *code, int size) +{ + int i; + FILE *ofd; + const char *tmp = getenv("TMP"); + char *as_file; + char *o_file; + char *cmd; + + if (tmp == NULL) + tmp = "/tmp"; + as_file = g_strdup_printf ("%s/test.s", tmp); + + if (!(ofd = fopen (as_file, "w"))) + g_assert_not_reached (); + + fprintf (ofd, "tmp:\n"); + + for (i = 0; i < size; ++i) + fprintf (ofd, ".byte %d\n", (unsigned int) code [i]); + + fclose (ofd); +#ifdef __APPLE__ +#define DIS_CMD "otool -V -v -t" +#else +#define DIS_CMD "objdump -d" +#endif + o_file = g_strdup_printf ("%s/test.o", tmp); + cmd = g_strdup_printf ("as %s -o %s", as_file, o_file); + system (cmd); + g_free (cmd); + cmd = g_strdup_printf (DIS_CMD " %s", o_file); + system (cmd); + g_free (cmd); + g_free (o_file); + g_free (as_file); +} + + #define NOT_IMPLEMENTED(x) \ g_error ("FIXME: %s is not yet implemented. (trampoline)", x); @@ -279,27 +319,29 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) } #define ARG_BASE ppc_r12 +#define ARG_SIZE sizeof (stackval) #define SAVE_4_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ - gr ++; \ - } else { \ - ppc_lwz (p, ppc_r11, i*16, ARG_BASE); \ - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ - stack_par_pos += 4; \ - } + if (gr < GENERAL_REGS) { \ + ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); \ + gr ++; \ + ALWAYS_ON_STACK (stack_par_pos += 4); \ + } else { \ + ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); \ + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ + stack_par_pos += 4; \ + } #define SAVE_4_VAL_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); \ - ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); \ - gr ++; \ - ALWAYS_ON_STACK (stack_par_pos += 4); \ - } else { \ - ppc_lwz (p, ppc_r11, i*16, ARG_BASE); \ - ppc_lwz (p, ppc_r11, 0, ppc_r11); \ - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ - stack_par_pos += 4; \ - } + if (gr < GENERAL_REGS) { \ + ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); \ + ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); \ + gr ++; \ + ALWAYS_ON_STACK (stack_par_pos += 4); \ + } else { \ + ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); \ + ppc_lwz (p, ppc_r11, 0, ppc_r11); \ + ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ + stack_par_pos += 4; \ + } inline static guint8* emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gboolean use_memcpy) @@ -437,18 +479,18 @@ DEBUG(printf("Mono_Type_i8. gr = %d, arg_base = %d\n", gr, ARG_BASE)); gr++; #endif if (gr < 7) { - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); - ppc_lwz (p, ppc_r3 + gr + 1, i*16 + 4, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr + 1, i*ARG_SIZE + 4, ARG_BASE); ALWAYS_ON_STACK (stack_par_pos += 8); } else if (gr == 7) { - ppc_lwz (p, ppc_r3 + gr, i*16, ARG_BASE); - ppc_lwz (p, ppc_r11, i*16 + 4, ARG_BASE); + ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); + ppc_lwz (p, ppc_r11, i*ARG_SIZE + 4, ARG_BASE); ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); stack_par_pos += 8; } else { - ppc_lwz (p, ppc_r11, i*16, ARG_BASE); + ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); - ppc_lwz (p, ppc_r11, i*16 + 4, ARG_BASE); + ppc_lwz (p, ppc_r11, i*ARG_SIZE + 4, ARG_BASE); ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); stack_par_pos += 8; } @@ -456,7 +498,7 @@ DEBUG(printf("Mono_Type_i8. gr = %d, arg_base = %d\n", gr, ARG_BASE)); break; case MONO_TYPE_R4: if (fr < 7) { - ppc_lfs (p, ppc_f1 + fr, i*16, ARG_BASE); + ppc_lfs (p, ppc_f1 + fr, i*ARG_SIZE, ARG_BASE); fr ++; FP_ALSO_IN_REG (gr ++); ALWAYS_ON_STACK (stack_par_pos += 4); @@ -466,7 +508,7 @@ DEBUG(printf("Mono_Type_i8. gr = %d, arg_base = %d\n", gr, ARG_BASE)); break; case MONO_TYPE_R8: if (fr < 7) { - ppc_lfd (p, ppc_f1 + fr, i*16, ARG_BASE); + ppc_lfd (p, ppc_f1 + fr, i*ARG_SIZE, ARG_BASE); fr ++; FP_ALSO_IN_REG (gr += 2); ALWAYS_ON_STACK (stack_par_pos += 8); @@ -631,7 +673,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) #ifdef __APPLE__ -#define MINV_POS 24 /* MonoInvocation structure offset on stack */ +#define MINV_POS 40 /* MonoInvocation structure offset on stack - STACK_PARAM_OFFSET + 4 pointer args for stackval_from_data */ #else #define MINV_POS 8 /* MonoInvocation structure offset on stack */ #endif @@ -655,8 +697,8 @@ mono_arch_create_method_pointer (MonoMethod *method) MonoMethodSignature *sig; MonoJitInfo *ji; guint8 *p, *code_buffer; - guint i, align = 0, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param, stack_param, - this_flag, cpos, vt_cur; + guint i, align = 0, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param = 0, stack_param, + cpos, vt_cur; gint *vtbuf; guint32 simpletype; @@ -694,18 +736,12 @@ mono_arch_create_method_pointer (MonoMethod *method) if (sig->hasthis) { ppc_stw (p, ppc_r3, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), ppc_r31); reg_param = 1; - } else if (sig->param_count) { - DEBUG (printf ("save r%d\n", 3)); - ppc_stw (p, ppc_r3, local_pos, ppc_r31); - local_pos += 4; - reg_param = 0; - } + } - this_flag = (sig->hasthis ? 1 : 0); if (sig->param_count) { - gint save_count = MIN (8, sig->param_count - 1); + gint save_count = MIN (8, sig->param_count + sig->hasthis); for (i = reg_param; i < save_count; i ++) { - ppc_stw (p, ppc_r4 + i, local_pos, ppc_r31); + ppc_stw (p, ppc_r3 + i, local_pos, ppc_r31); local_pos += 4; DEBUG (printf ("save r%d\n", 4 + i)); } @@ -744,7 +780,7 @@ mono_arch_create_method_pointer (MonoMethod *method) /* add stackval arguments */ for (i = 0; i < sig->param_count; ++i) { if (reg_param < 8) { - ppc_addi (p, ppc_r5, ppc_r31, local_start + (reg_param - this_flag)*4); + ppc_addi (p, ppc_r5, ppc_r31, local_start + i*4); reg_param ++; } else { ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); @@ -768,13 +804,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ppc_mtlr (p, ppc_r0); ppc_blrl (p); - /* fixme: alignment */ - DEBUG (printf ("arg_pos %d --> ", stackval_arg_pos)); - if (sig->pinvoke) - stackval_arg_pos += 4*mono_type_native_stack_size (sig->params [i], &align); - else - stackval_arg_pos += 4*mono_type_stack_size (sig->params [i], &align); - DEBUG (printf ("%d\n", stackval_arg_pos)); + stackval_arg_pos += sizeof (stackval); } /* return value storage */ @@ -849,6 +879,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ppc_blr (p); /* return */ DEBUG (printf ("emited code size: %d\n", p - code_buffer)); + DEBUG (disassemble (code_buffer, p - code_buffer)); flush_icache (code_buffer, p - code_buffer); DEBUG (printf ("Delegate [end emiting]\n")); -- cgit v1.1 From f4dcc4e46be455a7a289a969529ba4a1cd0bc3f3 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 7 May 2004 19:53:40 +0000 Subject: Bring s390 JIT up to date. svn path=/trunk/mono/; revision=26943 --- s390/s390-codegen.h | 253 ++++++++++++++++++++++++++++++++++++++++++++++++---- s390/tramp.c | 252 +++++++++++++++++++-------------------------------- 2 files changed, 326 insertions(+), 179 deletions(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index 5f6c255..9e59033 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -7,6 +7,73 @@ #include #include +#define FLOAT_REGS 2 /* No. float registers for parms */ +#define GENERAL_REGS 5 /* No. general registers for parms */ + +#define ARG_BASE s390_r10 /* Register for addressing arguments*/ +#define STKARG \ + (i*(sizeof(stackval))) /* Displacement of ith argument */ + +#define MINV_POS 96 /* MonoInvocation stack offset */ +#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) +#define OBJ_POS 8 +#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) + +#define MIN_CACHE_LINE 256 + +/*------------------------------------------------------------------*/ +/* Sequence to add an int/long long to parameters to stack_from_data*/ +/*------------------------------------------------------------------*/ +#define ADD_ISTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a float/double to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_RSTACK_PARM(i) \ + if (fpr_param < FLOAT_REGS) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + float_pos + (fpr_param * sizeof(float) * (i))); \ + fpr_param++; \ + } else { \ + stack_param += (stack_param % (i)); \ + s390_la (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a structure ptr to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_TSTACK_PARM \ + if (reg_param < GENERAL_REGS) { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param++; \ + } else { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param++; \ + } + +#define ADD_PSTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param++; \ + } typedef enum { s390_r0 = 0, s390_r1, @@ -49,34 +116,186 @@ typedef enum { s390_fpc = 256, } S390SpecialRegister; -#define s390_word(addr, value) *((guint32 *) addr) = (guint32) (value); ((guint32 *) addr)++ -#define s390_emit16(c, x) *((guint16 *) c) = x; ((guint16 *) c)++ -#define s390_emit32(c, x) *((guint32 *) c) = x; ((guint32 *) c)++ +#define s390_is_imm16(val) ((gint)val >= (gint)-(1<<15) && \ + (gint)val <= (gint)((1<<15)-1)) +#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) +#define s390_is_imm12(val) ((gint)val >= (gint)-(1<<11) && \ + (gint)val <= (gint)((1<<15)-1)) +#define s390_is_uimm12(val) ((gint)val >= 0 && (gint)val <= 4095) + +#define STK_BASE s390_r15 +#define S390_MINIMAL_STACK_SIZE 96 +#define S390_REG_SAVE_OFFSET 24 +#define S390_RET_ADDR_OFFSET 56 + +#define S390_CC_ZR 8 +#define S390_CC_NE 7 +#define S390_CC_NZ 7 +#define S390_CC_LT 4 +#define S390_CC_GT 2 +#define S390_CC_GE 11 +#define S390_CC_LE 13 +#define S390_CC_OV 1 +#define S390_CC_NO 14 +#define S390_CC_CY 3 +#define S390_CC_NC 12 +#define S390_CC_UN 15 + +#define s390_word(addr, value) do {*((guint32 *) addr) = (guint32) (value); \ + ((guint32 *) addr)++;} while (0) +#define s390_float(addr, value) do {*((guint32 *) addr) = (guint32) (value); \ + ((guint32 *) addr)++;} while (0) +#define s390_llong(addr, value) do {*((guint64 *) addr) = (guint64) (value); \ + ((guint64 *) addr)++;} while (0) +#define s390_double(addr, value) do {*((guint64 *) addr) = (guint64) (value); \ + ((guint64 *) addr)++;} while (0) +#define s390_emit16(c, x) do {*((guint16 *) c) = x; ((guint16 *) c)++;} while(0) +#define s390_emit32(c, x) do {*((guint32 *) c) = x; ((guint32 *) c)++;} while(0) #define s390_basr(code, r1, r2) s390_emit16 (code, (13 << 8 | (r1) << 4 | (r2))) #define s390_bras(code, r, o) s390_emit32 (code, (167 << 24 | (r) << 20 | 5 << 16 | (o))) +#define s390_brasl(code, r, o) do {s390_emit16 (code, (192 << 8 | (r) << 4 | 5)); \ + s390_emit32 (code, (o));} while(0) #define s390_ahi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 10 << 16 | ((v) & 0xffff))) +#define s390_alcr(code, r1, r2) s390_emit32 (code, (185 << 24 | 152 << 16 | (r1) << 4 | (r2))) +#define s390_ar(code, r1, r2) s390_emit16 (code, (26 << 8 | (r1) << 4 | (r2))) +#define s390_alr(code, r1, r2) s390_emit16 (code, (30 << 8 | (r1) << 4 | (r2))) +#define s390_a(code, r, x, b, d) s390_emit32 (code, (90 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_al(code, r, x, b, d) s390_emit32 (code, (94 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_slbr(code, r1, r2) s390_emit32 (code, (185 << 24 | 153 << 16 | (r1) << 4 | (r2))) +#define s390_sr(code, r1, r2) s390_emit16 (code, (27 << 8 | (r1) << 4 | (r2))) +#define s390_slr(code, r1, r2) s390_emit16 (code, (31 << 8 | (r1) << 4 | (r2))) +#define s390_s(code, r, x, b, d) s390_emit32 (code, (91 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_sl(code, r, x, b, d) s390_emit32 (code, (95 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_mr(code, r1, r2) s390_emit16 (code, (28 << 8 | (r1) << 4 | (r2))) +#define s390_m(code, r, x, b, d) s390_emit32 (code, (92 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_msr(code, r1, r2) s390_emit32 (code, (178 << 24 | 82 << 16 | (r1) << 4| (r2))) +#define s390_ms(code, r, x, b, d) s390_emit32 (code, (113 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_mlr(code, r1, r2) s390_emit32 (code, (185 << 24 | 150 << 16 | (r1) << 4| (r2))) +#define s390_dr(code, r1, r2) s390_emit16 (code, (29 << 8 | (r1) << 4 | (r2))) +#define s390_d(code, r, x, b, d) s390_emit32 (code, (93 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_dlr(code, r1, r2) s390_emit32 (code, (185 << 24 | 151 << 16 | (r1) << 4| (r2))) #define s390_br(code, r) s390_emit16 (code, (7 << 8 | 15 << 4 | (r))) #define s390_nr(code, r1, r2) s390_emit16 (code, (20 << 8 | (r1) << 4 | (r2))) +#define s390_n(code, r, x, b, d) s390_emit32 (code, (84 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_or(code, r1, r2) s390_emit16 (code, (22 << 8 | (r1) << 4 | (r2))) +#define s390_o(code, r, x, b, d) s390_emit32 (code, (86 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_xr(code, r1, r2) s390_emit16 (code, (23 << 8 | (r1) << 4 | (r2))) +#define s390_x(code, r, x, b, d) s390_emit32 (code, (87 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) #define s390_lr(code, r1, r2) s390_emit16 (code, (24 << 8 | (r1) << 4 | (r2))) -#define s390_l(code, r, b, d) s390_emit32 (code, (88 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_ltr(code, r1, r2) s390_emit16 (code, (18 << 8 | (r1) << 4 | (r2))) +#define s390_l(code, r, x, b, d) s390_emit32 (code, (88 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_lcr(code, r1, r2) s390_emit16 (code, (19 << 8 | (r1) << 4 | (r2))) +#define s390_lnr(code, r1, r2) s390_emit16 (code, (17 << 8 | (r1) << 4 | (r2))) +#define s390_lpr(code, r1, r2) s390_emit16 (code, (16 << 8 | (r1) << 4 | (r2))) #define s390_lm(code, r1, r2, b, d) s390_emit32 (code, (152 << 24 | (r1) << 20 | (r2) << 16 \ | (b) << 12 | ((d) & 0xfff))) -#define s390_lh(code, r, b, d) s390_emit32 (code, (72 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_lh(code, r, x, b, d) s390_emit32 (code, (72 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) #define s390_lhi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 8 << 16 | ((v) & 0xffff))) -#define s390_ic(code, r, b, d) s390_emit32 (code, (67 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_st(code, r, b, d) s390_emit32 (code, (80 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_ic(code, r, x, b, d) s390_emit32 (code, (67 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_icm(code, r, m, b, d) s390_emit32 (code, (191 << 24 | (r) << 20 | (m) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_st(code, r, x, b, d) s390_emit32 (code, (80 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) #define s390_stm(code, r1, r2, b, d) s390_emit32 (code, (144 << 24 | (r1) << 20 | (r2) << 16 \ | (b) << 12 | ((d) & 0xfff))) -#define s390_sth(code, r, b, d) s390_emit32 (code, (64 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_stc(code, r, b, d) s390_emit32 (code, (66 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_la(code, r, b, d) s390_emit32 (code, (65 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_ld(code, f, b, d) s390_emit32 (code, (104 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_le(code, f, b, d) s390_emit32 (code, (120 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_std(code, f, b, d) s390_emit32 (code, (96 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_ste(code, f, b, d) s390_emit32 (code, (112 << 24 | (f) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_mvc(c, l, b1, d1, b2, d2) s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ +#define s390_stam(c, r1, r2, b, d) s390_emit32 (code, (155 << 24 | (r1) << 20 | (r2) << 16 \ + | (b) << 12 | ((d) & 0xfff))) +#define s390_lam(c, r1, r2, b, d) s390_emit32 (code, (154 << 24 | (r1) << 20 | (r2) << 16 \ + | (b) << 12 | ((d) & 0xfff))) +#define s390_sth(code, r, x, b, d) s390_emit32 (code, (64 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_stc(code, r, x, b, d) s390_emit32 (code, (66 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_stcm(code, r, m, b, d) s390_emit32 (code, (190 << 24 | (r) << 20 | (m) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_la(code, r, x, b, d) s390_emit32 (code, (65 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_larl(code, r, o) do { \ + s390_emit16 (code, (192 << 8 | (r) << 4)); \ + s390_emit32 (code, (o)); \ + } while (0) +#define s390_ld(code, f, x, b, d) s390_emit32 (code, (104 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_le(code, f, x, b, d) s390_emit32 (code, (120 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_std(code, f, x, b, d) s390_emit32 (code, (96 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_ste(code, f, x, b, d) s390_emit32 (code, (112 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_mvc(c, l, b1, d1, b2, d2) do {s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ (b1) << 12 | ((d1) & 0xfff))); \ - s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff))) -#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))); + s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff)));} while (0) +#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))) +#define s390_break(c) s390_emit16 (c, 0) +#define s390_nill(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 7 << 16 | ((v) & 0xffff))) +#define s390_nilh(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 6 << 16 | ((v) & 0xffff))) +#define s390_brc(c, m, d) s390_emit32 (c, (167 << 24 | ((m) & 0xff) << 20 | 4 << 16 | ((d) & 0xffff))) +#define s390_cr(c, r1, r2) s390_emit16 (c, (25 << 8 | (r1) << 4 | (r2))) +#define s390_clr(c, r1, r2) s390_emit16 (c, (21 << 8 | (r1) << 4 | (r2))) +#define s390_c(c, r, x, b, d) s390_emit32 (c, (89 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_cl(c, r, x, b, d) s390_emit32 (c, (85 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_j(c,d) s390_brc(c, S390_CC_UN, d) +#define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) +#define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) +#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) +#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) +#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) +#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) +#define s390_jc(c, m, d) s390_brc(c, m, d) +#define s390_jcl(c, m, d) do {s390_emit16 (c, (192 << 8 | (m) << 4 | 4)); \ + s390_emit32 (c, (d));} while(0) +#define s390_slda(c, r, b, d) s390_emit32 (c, (143 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_sldl(c, r, b, d) s390_emit32 (c, (141 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_srda(c, r, b, d) s390_emit32 (c, (142 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_srdl(c, r, b, d) s390_emit32 (c, (140 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_sla(c, r, b, d) s390_emit32 (c, (139 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_sll(c, r, b, d) s390_emit32 (c, (137 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_sra(c, r, b, d) s390_emit32 (c, (138 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_srl(c, r, b, d) s390_emit32 (c, (136 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) +#define s390_sqdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 21 << 16 | ((r1) << 4) | (r2))) +#define s390_sqebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 20 << 16 | ((r1) << 4) | (r2))) +#define s390_adbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 26 << 16 | ((r1) << 4) | (r2))) +#define s390_aebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 10 << 16 | ((r1) << 4) | (r2))) +#define s390_adb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (26)); \ + } while (0) +#define s390_sdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 27 << 16 | ((r1) << 4) | (r2))) +#define s390_sdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (27)); \ + } while (0) +#define s390_sebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 11 << 16 | ((r1) << 4) | (r2))) +#define s390_mdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 28 << 16 | ((r1) << 4) | (r2))) +#define s390_meebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 23 << 16 | ((r1) << 4) | (r2))) +#define s390_ldr(c, r1, r2) s390_emit16 (c, (40 << 8 | (r1) << 4 | (r2))) +#define s390_ler(c, r1, r2) s390_emit16 (c, (56 << 8 | (r1) << 4 | (r2))) +#define s390_lzdr(c, r1) s390_emit32 (c, (179 << 24 | 117 << 16 | (r1) << 4)) +#define s390_lzer(c, r1) s390_emit32 (c, (179 << 24 | 116 << 16 | (r1) << 4)) +#define s390_ddbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 29 << 16 | ((r1) << 4) | (r2))) +#define s390_debr(c, r1, r2) s390_emit32 (c, (179 << 24 | 13 << 16 | ((r1) << 4) | (r2))) +#define s390_didbr(c, r1, r2, m, r3) s390_emit32 (c, (179 << 24 | 91 << 16 | ((r3) << 12) | ((m) << 8) | ((r1) << 4) | (r2))) +#define s390_lcdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 19 << 16 | ((r1) << 4) | (r2))) +#define s390_lndbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 17 << 16 | ((r1) << 4) | (r2))) +#define s390_ldebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 4 << 16 | ((r1) << 4) | (r2))) +#define s390_lnebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 1 << 16 | ((r1) << 4) | (r2))) +#define s390_ledbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 68 << 16 | ((r1) << 4) | (r2))) +#define s390_cfdbr(c, r1, m, f2) s390_emit32 (c, (179 << 24 | 153 << 16 | (m) << 8 | (r1) << 4 | (f2))) +#define s390_cdfbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 149 << 16 | (r1) << 4 | (r2))) +#define s390_cefbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 148 << 16 | (r1) << 4 | (r2))) +#define s390_cdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 25 << 16 | (r1) << 4 | (r2))) +#define s390_cebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 9 << 16 | (r1) << 4 | (r2))) +#define s390_cdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (25)); \ + } while (0) +#define s390_tcdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (17)); \ + } while (0) +#define s390_tedb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (16)); \ + } while (0) +#define s390_stfpc(c, b, d) s390_emit32 (c, (178 << 24 | 156 << 16 | \ + (b) << 12 | ((d) & 0xfff))) #endif diff --git a/s390/tramp.c b/s390/tramp.c index 50fa1a4..d1dec5e 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -18,78 +18,9 @@ #define PROLOG_INS 24 /* Size of emitted prolog */ #define CALL_INS 4 /* Size of emitted call */ #define EPILOG_INS 18 /* Size of emitted epilog */ -#define MIN_STACK_SIZE 96 /* Basic size of S/390 stack frame */ -#define FLOAT_REGS 2 /* No. float registers for parms */ -#define GENERAL_REGS 5 /* No. general registers for parms */ - -#define ARG_BASE s390_r10 /* Register for addressing arguments*/ -#define STK_BASE s390_r15 /* Register for addressing stack */ -#define STKARG \ - (i*(sizeof(stackval))) /* Displacement of ith argument */ - -#define MINV_POS 96 /* MonoInvocation stack offset */ -#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) -#define OBJ_POS 8 -#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) #define DEBUG(x) -#define MIN_CACHE_LINE 256 - -/*------------------------------------------------------------------*/ -/* Sequence to add an int/long long to parameters to stack_from_data*/ -/*------------------------------------------------------------------*/ -#define ADD_ISTACK_PARM(r, i) \ - if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param += (i); \ - } else { \ - s390_la (p, s390_r4, STK_BASE, \ - sz.stack_size + 96 + stack_param * sizeof(long)); \ - stack_param += (i); \ - } - -/*------------------------------------------------------------------*/ -/* Sequence to add a float/double to parameters to stack_from_data */ -/*------------------------------------------------------------------*/ -#define ADD_RSTACK_PARM(i) \ - if (fpr_param < FLOAT_REGS) { \ - s390_la (p, s390_r4, STK_BASE, \ - float_pos + (fpr_param * sizeof(float) * (i))); \ - fpr_param++; \ - } else { \ - stack_param += (stack_param % (i)); \ - s390_la (p, s390_r4, STK_BASE, \ - sz.stack_size + 96 + stack_param * sizeof(float) * (i)); \ - stack_param += (i); \ - } - -/*------------------------------------------------------------------*/ -/* Sequence to add a structure ptr to parameters to stack_from_data */ -/*------------------------------------------------------------------*/ -#define ADD_TSTACK_PARM \ - if (reg_param < GENERAL_REGS) { \ - s390_l (p, s390_r4, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param++; \ - } else { \ - s390_l (p, s390_r4, STK_BASE, \ - sz.stack_size + 96 + stack_param * sizeof(long)); \ - stack_param++; \ - } - -#define ADD_PSTACK_PARM(r, i) \ - if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param += (i); \ - } else { \ - s390_l (p, s390_r4, STK_BASE, \ - sz.stack_size + 96 + stack_param * sizeof(long)); \ - stack_param++; \ - } - /*========================= End of Defines =========================*/ /*------------------------------------------------------------------*/ @@ -185,7 +116,7 @@ calculate_sizes (MonoMethodSignature *sig, size_data *sz, fr = 0; gr = 2; sz->retStruct = 0; - sz->stack_size = MIN_STACK_SIZE; + sz->stack_size = S390_MINIMAL_STACK_SIZE; sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS); sz->local_size = 0; @@ -385,10 +316,10 @@ emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) /* function prolog */ s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); - s390_l (p, s390_r7, STK_BASE, 96); + s390_l (p, s390_r7, 0, STK_BASE, MINV_POS); s390_lr (p, s390_r11, STK_BASE); s390_ahi (p, STK_BASE, -stack_size); - s390_st (p, s390_r11, STK_BASE, 0); + s390_st (p, s390_r11, 0, STK_BASE, 0); /*-----------------------------------------*/ /* Save: */ @@ -436,7 +367,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) gr = 0; fr = 0; act_strs = 0; - stack_par_pos = MIN_STACK_SIZE; + stack_par_pos = S390_MINIMAL_STACK_SIZE; local_pos = sz->stack_size; if (sig->hasthis) { @@ -449,11 +380,11 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref)); if (sig->params [i]->byref) { if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); gr ++; } else { - s390_l (p, s390_r0, ARG_BASE, STKARG); - s390_st (p, s390_r0, STK_BASE, stack_par_pos); + s390_l (p, s390_r0, 0, ARG_BASE, STKARG); + s390_st (p, s390_r0, 0, STK_BASE, stack_par_pos); stack_par_pos += sizeof(long); } continue; @@ -477,11 +408,11 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) case MONO_TYPE_STRING: case MONO_TYPE_SZARRAY: if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); gr ++; } else { - s390_l (p, s390_r0, ARG_BASE, STKARG); - s390_st (p, s390_r0, STK_BASE, stack_par_pos); + s390_l (p, s390_r0, 0, ARG_BASE, STKARG); + s390_st (p, s390_r0, 0, STK_BASE, stack_par_pos); stack_par_pos += sizeof(long); } break; @@ -501,24 +432,24 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) case 2: case 4: if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); - s390_l (p, s390_r2 + gr, s390_r2 + gr, 0); + s390_l (p, s390_r2 + gr, 0,ARG_BASE, STKARG); + s390_l (p, s390_r2 + gr, 0, s390_r2 + gr, 0); gr++; } else { stack_par_pos += (stack_par_pos % align); - s390_l (p, s390_r10, ARG_BASE, STKARG); - s390_l (p, s390_r10, s390_r10, 0); - s390_st (p, s390_r10, STK_BASE, stack_par_pos); + s390_l (p, s390_r10, 0,ARG_BASE, STKARG); + s390_l (p, s390_r10, 0, s390_r10, 0); + s390_st (p, s390_r10, 0, STK_BASE, stack_par_pos); stack_par_pos += sizeof(long); } break; case 8: if (gr < GENERAL_REGS-1) { - s390_l (p, s390_r2 + gr, ARG_BASE, STKARG); + s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); s390_lm (p, s390_r2 + gr, s390_r3 + gr, s390_r2 + gr, 0); } else { stack_par_pos += (stack_par_pos % align); - s390_l (p, s390_r10, ARG_BASE, STKARG); + s390_l (p, s390_r10, 0, ARG_BASE, STKARG); s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0); stack_par_pos += sizeof(long long); } @@ -526,29 +457,29 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) default: if (size <= 256) { local_pos += (local_pos % align); - s390_l (p, s390_r13, ARG_BASE, STKARG); + s390_l (p, s390_r13, 0, ARG_BASE, STKARG); s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0); - s390_la (p, s390_r13, STK_BASE, local_pos); + s390_la (p, s390_r13, 0, STK_BASE, local_pos); local_pos += size; } else { local_pos += (local_pos % align); s390_bras (p, s390_r13, 4); s390_word (p, size); - s390_l (p, s390_r1, s390_r13, 0); - s390_l (p, s390_r0, ARG_BASE, STKARG); + s390_l (p, s390_r1, 0, s390_r13, 0); + s390_l (p, s390_r0, 0, ARG_BASE, STKARG); s390_lr (p, s390_r14, s390_r12); - s390_la (p, s390_r12, STK_BASE, local_pos); + s390_la (p, s390_r12, 0, STK_BASE, local_pos); s390_lr (p, s390_r13, s390_r1); s390_mvcl (p, s390_r12, s390_r0); s390_lr (p, s390_r12, s390_r14); - s390_la (p, s390_r13, STK_BASE, local_pos); + s390_la (p, s390_r13, 0, STK_BASE, local_pos); local_pos += size; } if (gr < GENERAL_REGS) { s390_lr (p, s390_r2 + gr, s390_r13); gr++; } else { - s390_st (p, s390_r13, STK_BASE, stack_par_pos); + s390_st (p, s390_r13, 0, STK_BASE, stack_par_pos); stack_par_pos += sizeof(long); } } @@ -566,7 +497,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) break; case MONO_TYPE_R4: if (fr < FLOAT_REGS) { - s390_le (p, s390_r0 + fr, ARG_BASE, STKARG); + s390_le (p, s390_r0 + fr, 0, ARG_BASE, STKARG); fr++; } else { s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG); @@ -575,7 +506,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) break; case MONO_TYPE_R8: if (fr < FLOAT_REGS) { - s390_ld (p, s390_r0 + fr, ARG_BASE, STKARG); + s390_ld (p, s390_r0 + fr, 0, ARG_BASE, STKARG); fr++; } else { *(guint32 *) p += 7; @@ -594,7 +525,7 @@ emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) /* then point the result area for the called routine */ /*----------------------------------------------------------*/ if (sz->retStruct) { - s390_l (p, s390_r2, s390_r8, 0); + s390_l (p, s390_r2, 0, s390_r8, 0); } return p; @@ -652,7 +583,7 @@ emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, /* get return value */ if (sig->ret->byref || string_ctor) { - s390_st (p, s390_r2, s390_r8, 0); + s390_st (p, s390_r2, 0, s390_r8, 0); } else { simpletype = sig->ret->type; enum_retvalue: @@ -660,12 +591,12 @@ enum_retvalue: case MONO_TYPE_BOOLEAN: case MONO_TYPE_I1: case MONO_TYPE_U1: - s390_stc (p, s390_r2, s390_r8, 0); + s390_stc (p, s390_r2, 0, s390_r8, 0); break; case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_CHAR: - s390_sth (p, s390_r2, s390_r8, 0); + s390_sth (p, s390_r2, 0, s390_r8, 0); break; case MONO_TYPE_I4: case MONO_TYPE_U4: @@ -676,13 +607,13 @@ enum_retvalue: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: - s390_st (p, s390_r2, s390_r8, 0); + s390_st (p, s390_r2, 0, s390_r8, 0); break; case MONO_TYPE_R4: - s390_ste (p, s390_f0, s390_r8, 0); + s390_ste (p, s390_f0, 0, s390_r8, 0); break; case MONO_TYPE_R8: - s390_std (p, s390_f0, s390_r8, 0); + s390_std (p, s390_f0, 0, s390_r8, 0); break; case MONO_TYPE_I8: s390_stm (p, s390_r2, s390_r3, s390_r8, 0); @@ -701,13 +632,13 @@ printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); case 0: break; case 1: - s390_stc (p, s390_r2, s390_r8, 0); + s390_stc (p, s390_r2, 0, s390_r8, 0); break; case 2: - s390_sth (p, s390_r2, s390_r8, 0); + s390_sth (p, s390_r2, 0, s390_r8, 0); break; case 4: - s390_st (p, s390_r2, s390_r8, 0); + s390_st (p, s390_r2, 0, s390_r8, 0); break; case 8: s390_stm (p, s390_r2, s390_r3, s390_r8, 0); @@ -745,8 +676,8 @@ static inline guint8 * emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) { /* function epilog */ - s390_l (p, STK_BASE, STK_BASE, 0); - s390_l (p, s390_r4, STK_BASE, 56); + s390_l (p, STK_BASE, 0, STK_BASE, 0); + s390_l (p, s390_r4, 0, STK_BASE, 56); s390_lm (p, s390_r6, STK_BASE, STK_BASE, 24); s390_br (p, s390_r4); @@ -819,8 +750,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) /* allocate a MonoInvocation structure (inv) on the stack */ /* allocate an array of stackval on the stack with length = */ /* method->signature->param_count + 1 [call it stack_args] */ -/* set inv->ex, inv->ex_handler,inv->parent to */ -/* NULL */ +/* set inv->ex, inv->ex_handler, inv->parent to NULL */ /* set inv->method to method */ /* if method is an instance method, set inv->obj to the */ /* 'this' argument (the first argument) else set to NULL */ @@ -878,11 +808,11 @@ mono_arch_create_method_pointer (MonoMethod *method) /* prolog */ /*----------------------------------------------------------*/ s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); - s390_l (p, s390_r7, STK_BASE, 96); + s390_l (p, s390_r7, 0, STK_BASE, MINV_POS); s390_lr (p, s390_r0, STK_BASE); - s390_ahi (p, STK_BASE, -(sz.stack_size+96)); - s390_st (p, s390_r0, STK_BASE, 0); - s390_la (p, s390_r8, STK_BASE, 4); + s390_ahi (p, STK_BASE, -(sz.stack_size+MINV_POS)); + s390_st (p, s390_r0, 0, STK_BASE, 0); + s390_la (p, s390_r8, 0, STK_BASE, 4); s390_lr (p, s390_r10, s390_r8); s390_lhi (p, s390_r9, sz.stack_size+92); s390_lhi (p, s390_r11, 0); @@ -892,19 +822,19 @@ mono_arch_create_method_pointer (MonoMethod *method) /* Let's fill MonoInvocation - first zero some fields */ /*----------------------------------------------------------*/ s390_lhi (p, s390_r0, 0); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); s390_lhi (p, s390_r0, 1); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); /*----------------------------------------------------------*/ /* set method pointer */ /*----------------------------------------------------------*/ s390_bras (p, s390_r13, 4); s390_word (p, method); - s390_l (p, s390_r0, s390_r13, 0); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); + s390_l (p, s390_r0, 0, s390_r13, 0); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); @@ -938,21 +868,22 @@ mono_arch_create_method_pointer (MonoMethod *method) } if (this_flag) { - s390_st (p, s390_r2 + reg_save, STK_BASE, + s390_st (p, s390_r2 + reg_save, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); reg_param++; } else { - s390_st (p, s390_r2 + reg_save, STK_BASE, local_pos); + s390_st (p, s390_r2 + reg_save, 0, STK_BASE, local_pos); local_pos += sizeof(int); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + s390_st (p, s390_r0, 0, STK_BASE, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); } s390_stm (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos); local_pos += 4 * sizeof(long); float_pos = local_pos; - s390_std (p, s390_f0, STK_BASE, local_pos); + s390_std (p, s390_f0, 0, STK_BASE, local_pos); local_pos += sizeof(double); - s390_std (p, s390_f2, STK_BASE, local_pos); + s390_std (p, s390_f2, 0, STK_BASE, local_pos); local_pos += sizeof(double); /*----------------------------------------------------------*/ @@ -987,8 +918,8 @@ mono_arch_create_method_pointer (MonoMethod *method) /* set MonoInvocation::stack_args */ /*----------------------------------------------------------*/ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); - s390_la (p, s390_r0, STK_BASE, stackval_arg_pos); - s390_st (p, s390_r0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); + s390_la (p, s390_r0, 0, STK_BASE, stackval_arg_pos); + s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); /*----------------------------------------------------------*/ /* add stackval arguments */ @@ -1038,12 +969,12 @@ mono_arch_create_method_pointer (MonoMethod *method) } if (vtbuf [i] >= 0) { - s390_la (p, s390_r3, STK_BASE, vt_cur); - s390_st (p, s390_r3, STK_BASE, stackval_arg_pos); - s390_la (p, s390_r3, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r3, 0, STK_BASE, vt_cur); + s390_st (p, s390_r3, 0, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); vt_cur += vtbuf [i]; } else { - s390_la (p, s390_r3, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); } /*--------------------------------------*/ @@ -1054,12 +985,12 @@ mono_arch_create_method_pointer (MonoMethod *method) s390_word (p, sig->params [i]); s390_word (p, sig->pinvoke); s390_word (p, stackval_from_data); - s390_l (p, s390_r2, s390_r13, 0); + s390_l (p, s390_r2, 0, s390_r13, 0); - s390_l (p, s390_r5, s390_r13, 4); + s390_l (p, s390_r5, 0, s390_r13, 4); - s390_l (p, s390_r9, s390_r13, 8); - s390_basr (p, s390_r14, s390_r9); + s390_l (p, s390_r1, 0, s390_r13, 8); + s390_basr (p, s390_r14, s390_r1); stackval_arg_pos += sizeof(stackval); @@ -1076,13 +1007,13 @@ mono_arch_create_method_pointer (MonoMethod *method) /*----------------------------------------------------------*/ /* Set return area pointer. */ /*----------------------------------------------------------*/ - s390_la (p, s390_r10, STK_BASE, stackval_arg_pos); - s390_st (p, s390_r10, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); + s390_la (p, s390_r10, 0, STK_BASE, stackval_arg_pos); + s390_st (p, s390_r10, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { MonoClass *klass = sig->ret->data.klass; if (!klass->enumtype) { - s390_la (p, s390_r9, s390_r10, sizeof(stackval)); - s390_st (p, s390_r9, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r9, 0, s390_r10, sizeof(stackval)); + s390_st (p, s390_r9, 0,STK_BASE, stackval_arg_pos); stackval_arg_pos += sizeof(stackval); } } @@ -1092,9 +1023,9 @@ mono_arch_create_method_pointer (MonoMethod *method) /*----------------------------------------------------------*/ s390_bras (p, s390_r13, 4); s390_word (p, ves_exec_method); - s390_l (p, s390_r9, s390_r13, 0); - s390_la (p, s390_r2, STK_BASE, MINV_POS); - s390_basr (p, s390_r14, s390_r9); + s390_l (p, s390_r1, 0, s390_r13, 0); + s390_la (p, s390_r2, 0, STK_BASE, MINV_POS); + s390_basr (p, s390_r14, s390_r1); /*----------------------------------------------------------*/ /* move retval from stackval to proper place (r3/r4/...) */ @@ -1102,7 +1033,7 @@ mono_arch_create_method_pointer (MonoMethod *method) DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref)); if (sig->ret->byref) { DEBUG (printf ("ret by ref\n")); - s390_st (p, s390_r2, s390_r10, 0); + s390_st (p, s390_r2, 0, s390_r10, 0); } else { enum_retvalue: DEBUG(printf("Returns: %d\n",sig->ret->type)); @@ -1112,11 +1043,11 @@ DEBUG(printf("Returns: %d\n",sig->ret->type)); case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: s390_lhi (p, s390_r2, 0); - s390_ic (p, s390_r2, s390_r10, 0); + s390_ic (p, s390_r2, 0, s390_r10, 0); break; case MONO_TYPE_I2: case MONO_TYPE_U2: - s390_lh (p, s390_r2, s390_r10, 0); + s390_lh (p, s390_r2, 0,s390_r10, 0); break; case MONO_TYPE_I4: case MONO_TYPE_U4: @@ -1125,20 +1056,18 @@ DEBUG(printf("Returns: %d\n",sig->ret->type)); case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: case MONO_TYPE_CLASS: - s390_l (p, s390_r2, s390_r10, 0); + s390_l (p, s390_r2, 0, s390_r10, 0); break; case MONO_TYPE_I8: s390_lm (p, s390_r2, s390_r3, s390_r10, 0); break; case MONO_TYPE_R4: - s390_le (p, s390_f0, s390_r10, 0); + s390_le (p, s390_f0, 0, s390_r10, 0); break; case MONO_TYPE_R8: - s390_ld (p, s390_f0, s390_r10, 0); + s390_ld (p, s390_f0, 0, s390_r10, 0); break; case MONO_TYPE_VALUETYPE: -DEBUG(printf("Returning Structure %d\n",sig->pinvoke)); -DEBUG(printf("Size: %d (%d)\n",retSize,align)); if (sig->ret->data.klass->enumtype) { simpletype = sig->ret->data.klass->enum_basetype->type; goto enum_retvalue; @@ -1151,9 +1080,8 @@ DEBUG(printf("Size: %d (%d)\n",retSize,align)); s390_word (p, sig->ret); s390_word (p, sig->pinvoke); s390_word (p, stackval_to_data); - s390_l (p, s390_r2, s390_r13, 0); - s390_l (p, s390_r3, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); -DEBUG(printf("====> %08X\n",p)); + s390_l (p, s390_r2, 0, s390_r13, 0); + s390_l (p, s390_r3, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); if (sz.retStruct) { /*------------------------------------------*/ /* Get stackval_to_data to set result area */ @@ -1163,23 +1091,23 @@ DEBUG(printf("====> %08X\n",p)); /*------------------------------------------*/ /* Give stackval_to_data a temp result area */ /*------------------------------------------*/ - s390_la (p, s390_r4, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r4, 0, STK_BASE, stackval_arg_pos); } - s390_l (p, s390_r5, s390_r13, 4); - s390_l (p, s390_r9, s390_r13, 8); - s390_basr (p, s390_r14, s390_r9); + s390_l (p, s390_r5, 0,s390_r13, 4); + s390_l (p, s390_r1, 0, s390_r13, 8); + s390_basr (p, s390_r14, s390_r1); switch (retSize) { case 0: break; case 1: s390_lhi (p, s390_r2, 0); - s390_ic (p, s390_r2, s390_r10, 0); + s390_ic (p, s390_r2, 0, s390_r10, 0); break; case 2: - s390_lh (p, s390_r2, s390_r10, 0); + s390_lh (p, s390_r2, 0, s390_r10, 0); break; case 4: - s390_l (p, s390_r2, s390_r10, 0); + s390_l (p, s390_r2, 0, s390_r10, 0); break; case 8: s390_lm (p, s390_r2, s390_r3, s390_r10, 0); @@ -1200,9 +1128,9 @@ DEBUG(printf("====> %08X\n",p)); /*----------------------------------------------------------*/ /* epilog */ /*----------------------------------------------------------*/ - s390_l (p, STK_BASE, STK_BASE, 0); - s390_l (p, s390_r4, STK_BASE, 56); - s390_lm (p, s390_r6, STK_BASE, STK_BASE, 24); + s390_l (p, STK_BASE, 0, STK_BASE, 0); + s390_l (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET); + s390_lm (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET); s390_br (p, s390_r4); DEBUG (printf ("emited code size: %d\n", p - code_buffer)); -- cgit v1.1 From e85ff74df8db9dbeaa2f923b2d4b451fd84dcdc0 Mon Sep 17 00:00:00 2001 From: Bernie Solomon Date: Sat, 8 May 2004 01:03:26 +0000 Subject: 2004-05-07 Bernie Solomon * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32 svn path=/trunk/mono/; revision=26957 --- ChangeLog | 4 ++++ ppc/ppc-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index d95fb3e..82de0e0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-05-07 Bernie Solomon + + * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32 + 2004-04-29 Bernie Solomon * ppc/tramp.c: use sizeof (stackval), fix diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 1fb7ce3..7cb1c0c 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -111,7 +111,7 @@ enum { PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ }; -#define ppc_emit32(c,x) do { *((guint32 *) c) = x; ((guint32 *)c)++;} while (0) +#define ppc_emit32(c,x) do { *((guint32 *) c) = x; c = (char *)(c) + sizeof (guint32);} while (0) #define ppc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) #define ppc_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) -- cgit v1.1 From 47892f7ea09d90ff4385b3f9c3796d5ce80ee76d Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 10 May 2004 14:37:42 +0000 Subject: Fix macros. svn path=/trunk/mono/; revision=27028 --- ppc/ppc-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 7cb1c0c..a9d7eec 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -113,8 +113,8 @@ enum { #define ppc_emit32(c,x) do { *((guint32 *) c) = x; c = (char *)(c) + sizeof (guint32);} while (0) -#define ppc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) -#define ppc_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) +#define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) +#define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) #define ppc_load(c,D,v) do { \ if (ppc_is_imm16 ((v))) { \ -- cgit v1.1 From cf789b0df2ab67298e712242ca201bd01d38c254 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 21 May 2004 13:04:55 +0000 Subject: More encoding fixes. svn path=/trunk/mono/; revision=27820 --- ppc/ppc-codegen.h | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index a9d7eec..27c8690 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -4,8 +4,8 @@ for testing do the following: ./test | as -o test.o */ -#ifndef PPC_H -#define PPC_H +#ifndef __MONO_PPC_CODEGEN_H__ +#define __MONO_PPC_CODEGEN_H__ #include #include @@ -111,14 +111,14 @@ enum { PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ }; -#define ppc_emit32(c,x) do { *((guint32 *) c) = x; c = (char *)(c) + sizeof (guint32);} while (0) +#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (char *)(c) + sizeof (guint32);} while (0) #define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) #define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) #define ppc_load(c,D,v) do { \ if (ppc_is_imm16 ((v))) { \ - ppc_li ((c), (D), (v)); \ + ppc_li ((c), (D), (guint16)(v)); \ } else { \ ppc_lis ((c), (D), (guint32)(v) >> 16); \ ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ @@ -128,8 +128,8 @@ enum { #define ppc_break(c) ppc_tw((c),31,0,0) #define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) #define ppc_addis(c,D,A,d) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) -#define ppc_li(c,D,v) ppc_addi (c, D, 0, v); -#define ppc_lis(c,D,v) ppc_addis (c, D, 0, v); +#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v)); +#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v)); #define ppc_lwz(c,D,d,a) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_lhz(c,D,d,a) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_lbz(c,D,d,a) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) @@ -246,9 +246,9 @@ my and Ximian's copyright to this code. ;) #define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI) #define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (0x00000 << 1) | 0 ) -#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | B) +#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B)) #define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (32 << 1) | 0 ) -#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | B) +#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B)) #define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc) #define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0) @@ -530,15 +530,15 @@ my and Ximian's copyright to this code. ;) #define ppc_rfi(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (50 << 1) | 0) -#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc) #define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0) #define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1) -#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc) #define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0) #define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1) -#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 5) | (ME << 1) | Rc) +#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc) #define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0) #define ppc_rlwnmd(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,1) @@ -560,21 +560,21 @@ my and Ximian's copyright to this code. ;) #define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0) #define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1) -#define ppc_stbu(c,S,A,D) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stbu(c,S,A,D) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(D)) #define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0) #define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0) -#define ppc_stfdu(c,S,A,D) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stfdu(c,S,A,D) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(D)) #define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0) #define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0) -#define ppc_stfsu(c,S,A,D) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | D) +#define ppc_stfsu(c,S,A,D) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(D)) #define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0) #define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0) #define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0) -#define ppc_sthu(c,S,A,D) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | D) +#define ppc_sthu(c,S,A,D) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(D)) #define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0) #define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0) #define ppc_stmw(c,S,A,D) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)D) -- cgit v1.1 From 1ac8bbc10c8f2cff9fe8aef20bee51612aa77f88 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 16 Jun 2004 15:24:15 +0000 Subject: Wed Jun 16 18:11:41 CEST 2004 Paolo Molaro * Makefile.am, *.c, *.h: more API cleanups. svn path=/trunk/mono/; revision=29691 --- amd64/tramp.c | 4 ++-- arm/tramp.c | 2 +- hppa/tramp.c | 2 +- ppc/tramp.c | 2 +- s390/tramp.c | 2 +- sparc/tramp.c | 2 +- x86/tramp.c | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/amd64/tramp.c b/amd64/tramp.c index 09f80f2..17183c3 100644 --- a/amd64/tramp.c +++ b/amd64/tramp.c @@ -694,7 +694,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ji->code_size = 1; ji->code_start = method->addr; - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return method->addr; } @@ -1048,7 +1048,7 @@ enum_calc_size: ji->code_size = p - code_buffer; ji->code_start = g_memdup (code_buffer, p - code_buffer); - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return ji->code_start; } diff --git a/arm/tramp.c b/arm/tramp.c index f1f0c7c..b9e04b5 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -684,7 +684,7 @@ void* mono_arch_create_method_pointer (MonoMethod* method) ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff); ji->code_start = (gpointer) code_buff; - mono_jit_info_table_add(mono_root_domain, ji); + mono_jit_info_table_add(mono_get_root_domain (), ji); return code_buff; } diff --git a/hppa/tramp.c b/hppa/tramp.c index 1c09c9a..089abde 100644 --- a/hppa/tramp.c +++ b/hppa/tramp.c @@ -772,7 +772,7 @@ generate: ji->code_size = 4; // does this matter? ji->code_start = descriptor; - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return ji->code_start; } diff --git a/ppc/tramp.c b/ppc/tramp.c index f6c037d..f7e8557 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -889,7 +889,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ji->code_size = p - code_buffer; ji->code_start = code_buffer; - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return ji->code_start; } diff --git a/s390/tramp.c b/s390/tramp.c index d1dec5e..866fe5a 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -1142,7 +1142,7 @@ DEBUG(printf("Returns: %d\n",sig->ret->type)); ji->code_size = p - code_buffer; ji->code_start = code_buffer; - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return ji->code_start; } diff --git a/sparc/tramp.c b/sparc/tramp.c index a90fff4..19c0a78 100644 --- a/sparc/tramp.c +++ b/sparc/tramp.c @@ -1070,7 +1070,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ji->code_size = p - code_buffer; ji->code_start = code_buffer; - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); DEBUG(sparc_disassemble_code (code_buffer, p, method->name)); diff --git a/x86/tramp.c b/x86/tramp.c index ee98193..fab5a55 100644 --- a/x86/tramp.c +++ b/x86/tramp.c @@ -539,7 +539,7 @@ mono_arch_create_method_pointer (MonoMethod *method) ji->code_size = p - code_buffer; ji->code_start = g_memdup (code_buffer, p - code_buffer); - mono_jit_info_table_add (mono_root_domain, ji); + mono_jit_info_table_add (mono_get_root_domain (), ji); return ji->code_start; } -- cgit v1.1 From 4e0bce5ca726ed3d2a33d6cfdc3b41b04fcb91f8 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 17 Jun 2004 16:25:19 +0000 Subject: API cleanup fixes. svn path=/trunk/mono/; revision=29787 --- ppc/tramp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index f7e8557..abf5397 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -706,11 +706,11 @@ mono_arch_create_method_pointer (MonoMethod *method) stack_size = 1024; stack_param = 0; - sig = method->signature; + sig = mono_method_signature (method); p = code_buffer = g_malloc (code_size); - DEBUG (printf ("\nDelegate [start emiting] %s\n", method->name)); + DEBUG (printf ("\nDelegate [start emiting] %s\n", mono_method_get_name (method))); /* prolog */ ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ -- cgit v1.1 From d1881ea0cd90053526fa30405f4aeac90e06b485 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 18 Jun 2004 20:03:01 +0000 Subject: Fix broken ABI for stack parameters svn path=/trunk/mono/; revision=29915 --- s390/tramp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/s390/tramp.c b/s390/tramp.c index 866fe5a..1483cfd 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -43,6 +43,7 @@ #include "mono/metadata/tabledefs.h" #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" +#include "mono/metadata/marshal.h" /*========================= End of Includes ========================*/ -- cgit v1.1 From 08a92e1c00c0a0cf3c446257b446939062605260 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 30 Jun 2004 15:04:48 +0000 Subject: 2004-06-30 Zoltan Varga * sparc/sparc-codegen.h: Add SPARC64 support. svn path=/trunk/mono/; revision=30577 --- ChangeLog | 4 ++ sparc/sparc-codegen.h | 118 +++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 107 insertions(+), 15 deletions(-) diff --git a/ChangeLog b/ChangeLog index 82de0e0..b112a47 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-06-30 Zoltan Varga + + * sparc/sparc-codegen.h: Add SPARC64 support. + 2004-05-07 Bernie Solomon * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32 diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index e479879..2e447a4 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -4,7 +4,6 @@ #if SIZEOF_VOID_P == 8 #define SPARCV9 1 #else -#define SPARCV9 0 #endif typedef enum { @@ -176,6 +175,9 @@ typedef enum { sparc_fitos_val = 196, sparc_fitod_val = 200, sparc_fitoq_val = 204, + sparc_fxtos_val = 132, + sparc_fxtod_val = 136, + sparc_fxtoq_val = 140, sparc_fstoi_val = 209, sparc_fdtoi_val = 210, sparc_fqtoi_val = 211, @@ -186,8 +188,11 @@ typedef enum { sparc_fqtos_val = 199, sparc_fqtod_val = 203, sparc_fmovs_val = 1, + sparc_fmovd_val = 2, sparc_fnegs_val = 5, + sparc_fnegd_val = 6, sparc_fabss_val = 9, + sparc_fabsd_val = 10, sparc_fsqrts_val = 41, sparc_fsqrtd_val = 42, sparc_fsqrtq_val = 43, @@ -352,13 +357,15 @@ typedef struct { /* for use in logical ops, use 0 to not set flags */ #define sparc_cc 16 -#define sparc_is_imm13(val) ((gint)val >= (gint)-(1<<12) && (gint)val <= (gint)((1<<12)-1)) -#define sparc_is_imm22(val) ((gint)val >= (gint)-(1<<21) && (gint)val <= (gint)((1<<21)-1)) -#define sparc_is_imm16(val) ((gint)val >= (gint)-(1<<15) && (gint)val <= (gint)((1<<15)-1)) -#define sparc_is_imm19(val) ((gint)val >= (gint)-(1<<18) && (gint)val <= (gint)((1<<18)-1)) +#define sparc_is_imm13(val) ((glong)val >= (glong)-(1<<12) && (glong)val <= (glong)((1<<12)-1)) +#define sparc_is_imm22(val) ((glong)val >= (glong)-(1<<21) && (glong)val <= (glong)((1<<21)-1)) +#define sparc_is_imm16(val) ((glong)val >= (glong)-(1<<15) && (glong)val <= (glong)((1<<15)-1)) +#define sparc_is_imm19(val) ((glong)val >= (glong)-(1<<18) && (glong)val <= (glong)((1<<18)-1)) +#define sparc_is_imm30(val) ((glong)val >= (glong)-(1<<29) && (glong)val <= (glong)((1<<29)-1)) /* disassembly */ #define sparc_inst_op(inst) ((inst) >> 30) +#define sparc_inst_op2(inst) (((inst) >> 22) & 0x7) #define sparc_inst_rd(inst) (((inst) >> 25) & 0x1f) #define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f) #define sparc_inst_rs1(inst) (((inst) >> 14) & 0x1f) @@ -566,6 +573,9 @@ typedef struct { #define sparc_ldx(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),11,(dest)) #define sparc_ldx_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),11,(dest)) +#define sparc_ldsw(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),8,(dest)) +#define sparc_ldsw_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),8,(dest)) + #define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest)) #define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest)) @@ -617,6 +627,9 @@ typedef struct { #define sparc_restore(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),61,(dest)) #define sparc_restore_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),61,(dest)) +#define sparc_rett(ins,src,disp) sparc_encode_format3a((ins),2,0,(src),(disp),0x39,0) +#define sparc_rett_imm(ins,src,disp) sparc_encode_format3b((ins),2,(src),(disp),0x39,0) + #define sparc_jmpl(ins,base,disp,dest) sparc_encode_format3a((ins),2,0,(base),(disp),56,(dest)) #define sparc_jmpl_imm(ins,base,disp,dest) sparc_encode_format3b((ins),2,(base),(disp),56,(dest)) @@ -670,6 +683,10 @@ typedef struct { #define sparc_fitod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitod_val, r2, dest ) #define sparc_fitoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitoq_val, r2, dest ) +#define sparc_fxtos( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtos_val, r2, dest ) +#define sparc_fxtod( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtod_val, r2, dest ) +#define sparc_fxtoq( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtoq_val, r2, dest ) + #define sparc_fstoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoi_val, r2, dest ) #define sparc_fdtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoi_val, r2, dest ) #define sparc_fqtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtoi_val, r2, dest ) @@ -687,6 +704,10 @@ typedef struct { #define sparc_fnegs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fnegs_val, r2, dest ) #define sparc_fabss( ins, r2, dest ) sparc_fop( ins, 0, sparc_fabss_val, r2, dest ) +#define sparc_fmovd( ins, r2, dest) sparc_fop (ins, 0, sparc_fmovd_val, r2, dest); +#define sparc_fnegd( ins, r2, dest) sparc_fop (ins, 0, sparc_fnegd_val, r2, dest); +#define sparc_fabsd( ins, r2, dest) sparc_fop (ins, 0, sparc_fabsd_val, r2, dest); + #define sparc_fsqrts( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrts_val, r2, dest ) #define sparc_fsqrtd( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtd_val, r2, dest ) #define sparc_fsqrtq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtq_val, r2, dest ) @@ -744,6 +765,10 @@ typedef struct { #define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest)) #define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest)) +/* Sparc V9 */ +#define sparc_srax(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),39,(dest)) +#define sparc_srax_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),39,(dest)) + /* alu */ #define sparc_alu_reg(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),op|((setcc) ? 0x10 : 0),(dest)) @@ -808,10 +833,9 @@ typedef struct { #define sparc_ret(ins) sparc_jmpl_imm((ins),sparc_i7,8,sparc_g0) #define sparc_retl(ins) sparc_jmpl_imm((ins),sparc_o7,8,sparc_g0) #define sparc_restore_simple(ins) sparc_restore((ins),sparc_g0,sparc_g0,sparc_g0) +#define sparc_rett_simple(ins) sparc_rett_imm((ins),sparc_i7,8) -#define SPARC_SET_MAX_SIZE 8 - -#define sparc_set(ins,val,reg) \ +#define sparc_set32(ins,val,reg) \ do { \ if ((val) == 0) \ sparc_clr_reg((ins),(reg)); \ @@ -825,18 +849,71 @@ typedef struct { } \ } while (0) +#ifdef SPARCV9 +#define SPARC_SET_MAX_SIZE (6 * 4) +#else +#define SPARC_SET_MAX_SIZE (2 * 4) +#endif + #if SPARCV9 -#define sparc_set_ptr(ins,ptr,reg) \ +#define sparc_set(ins,ptr,reg) \ do { \ - guint32 top_word = ((guint64)ptr) >> 32; \ - guint32 bottom_word = ((guint64)ptr) & 0xffffffff; \ - sparc_set((ins),top_word,sparc_g1); \ - sparc_set((ins),bottom_word,(reg)); \ - sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \ - sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \ + g_assert ((reg) != sparc_g1); \ + gint64 val = (gint64)ptr; \ + guint32 top_word = (val) >> 32; \ + guint32 bottom_word = (val) & 0xffffffff; \ + if (val == 0) \ + sparc_clr_reg ((ins), reg); \ + else if ((val >= -4096) && ((val) <= 4095)) \ + sparc_or_imm((ins),FALSE,sparc_g0,bottom_word,(reg)); \ + else if ((val >= 0) && (val <= 4294967295L)) { \ + sparc_sethi((ins),bottom_word,(reg)); \ + sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \ + } \ + else if ((val >= 0) && (val <= (1L << 44) - 1)) { \ + sparc_sethi ((ins), (val >> 12), (reg)); \ + sparc_or_imm ((ins), FALSE, (reg), (val >> 12) & 0x3ff, (reg)); \ + sparc_sllx_imm ((ins),(reg), 12, (reg)); \ + sparc_or_imm ((ins), FALSE, (reg), (val) & 0xfff, (reg)); \ + } \ + else if (top_word == 0xffffffff) { \ + sparc_xnor ((ins), FALSE, sparc_g0, sparc_g0, sparc_g1); \ + sparc_sethi((ins),bottom_word,(reg)); \ + sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \ + sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \ + sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \ + } \ + else { \ + sparc_sethi((ins),top_word,sparc_g1); \ + sparc_sethi((ins),bottom_word,(reg)); \ + sparc_or_imm((ins),FALSE,sparc_g1,top_word&0x3ff,sparc_g1); \ + sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \ + sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \ + sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \ + } \ } while (0) #else +#define sparc_set(ins,val,reg) \ + do { \ + if ((val) == 0) \ + sparc_clr_reg((ins),(reg)); \ + else if (((guint32)(val) & 0x1fff) == 0) \ + sparc_sethi((ins),(guint32)(val),(reg)); \ + else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \ + sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \ + else { \ + sparc_sethi((ins),(guint32)(val),(reg)); \ + sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \ + } \ + } while (0) +#endif + #define sparc_set_ptr(ins,val,reg) sparc_set(ins,val,reg) + +#ifdef SPARCV9 +#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff7fffffff, reg) +#else +#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff, reg) #endif #define sparc_not(ins,reg) sparc_xnor((ins),FALSE,(reg),sparc_g0,(reg)) @@ -845,6 +922,17 @@ typedef struct { #define sparc_mov_reg_reg(ins,src,dest) sparc_or((ins),FALSE,sparc_g0,(src),(dest)) +#ifdef SPARCV9 +#define sparc_sti_imm sparc_stx_imm +#define sparc_ldi_imm sparc_ldx_imm +#define sparc_sti sparc_stx +#define sparc_ldi sparc_ldx +#else +#define sparc_sti_imm sparc_st_imm +#define sparc_ldi_imm sparc_ld_imm +#define sparc_sti sparc_st +#define sparc_ldi sparc_ld +#endif #endif /* __SPARC_CODEGEN_H__ */ -- cgit v1.1 From c9c82671d87761dc9a06b78082402924cf8f540d Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Mon, 12 Jul 2004 12:05:08 +0000 Subject: Add instructions to support enhanced memory-to-memory operations. svn path=/trunk/mono/; revision=31039 --- s390/s390-codegen.h | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index 9e59033..cea81fb 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -113,6 +113,25 @@ typedef enum { } S390FloatRegister; typedef enum { + s390_a0 = 0, + s390_a1, + s390_a2, + s390_a3, + s390_a4, + s390_a5, + s390_a6, + s390_a7, + s390_a8, + s390_a9, + s390_a10, + s390_a11, + s390_a12, + s390_a13, + s390_a14, + s390_a15, +} S390AccRegister; + +typedef enum { s390_fpc = 256, } S390SpecialRegister; @@ -215,7 +234,9 @@ typedef enum { #define s390_mvc(c, l, b1, d1, b2, d2) do {s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ (b1) << 12 | ((d1) & 0xfff))); \ s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff)));} while (0) -#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))) +#define s390_mvcle(c, r1, r3, d2, b2) s390_emit32 (c, (168 << 24 | (r1) << 20 | \ + (r3) << 16 | (b2) << 12 | \ + ((d2) & 0xfff))) #define s390_break(c) s390_emit16 (c, 0) #define s390_nill(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 7 << 16 | ((v) & 0xffff))) #define s390_nilh(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 6 << 16 | ((v) & 0xffff))) @@ -224,6 +245,7 @@ typedef enum { #define s390_clr(c, r1, r2) s390_emit16 (c, (21 << 8 | (r1) << 4 | (r2))) #define s390_c(c, r, x, b, d) s390_emit32 (c, (89 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) #define s390_cl(c, r, x, b, d) s390_emit32 (c, (85 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) +#define s390_chi(c, r, i) s390_emit32 (c, (167 << 24 | (r) << 20 | 15 << 16 | ((i) & 0xffff))) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) @@ -278,7 +300,11 @@ typedef enum { #define s390_ldebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 4 << 16 | ((r1) << 4) | (r2))) #define s390_lnebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 1 << 16 | ((r1) << 4) | (r2))) #define s390_ledbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 68 << 16 | ((r1) << 4) | (r2))) -#define s390_cfdbr(c, r1, m, f2) s390_emit32 (c, (179 << 24 | 153 << 16 | (m) << 8 | (r1) << 4 | (f2))) +#define s390_ldeb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ + (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ + s390_emit16 (c, (4)); \ + } while (0) +#define s390_cfdbr(c, r1, m, f2) s390_emit32 (c, (179 << 24 | 153 << 16 | (m) << 12 | (r1) << 4 | (f2))) #define s390_cdfbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 149 << 16 | (r1) << 4 | (r2))) #define s390_cefbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 148 << 16 | (r1) << 4 | (r2))) #define s390_cdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 25 << 16 | (r1) << 4 | (r2))) -- cgit v1.1 From f69c71790b01b62dd17d4479db005c3ef68e5e38 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Mon, 12 Jul 2004 23:03:57 +0000 Subject: Add mvcl instruction svn path=/trunk/mono/; revision=31055 --- s390/s390-codegen.h | 1 + 1 file changed, 1 insertion(+) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index cea81fb..00bf06b 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -234,6 +234,7 @@ typedef enum { #define s390_mvc(c, l, b1, d1, b2, d2) do {s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ (b1) << 12 | ((d1) & 0xfff))); \ s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff)));} while (0) +#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))) #define s390_mvcle(c, r1, r3, d2, b2) s390_emit32 (c, (168 << 24 | (r1) << 20 | \ (r3) << 16 | (b2) << 12 | \ ((d2) & 0xfff))) -- cgit v1.1 From c7d11ced2179a38a406489b57f4a2f317fbe5da3 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 23 Jul 2004 16:07:08 +0000 Subject: 2004-07-23 zovarga * amd64/amd64-codegen.h: Ongoing JIT work. svn path=/trunk/mono/; revision=31416 --- ChangeLog | 4 + amd64/amd64-codegen.h | 474 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 452 insertions(+), 26 deletions(-) diff --git a/ChangeLog b/ChangeLog index b112a47..3156077 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-23 zovarga + + * amd64/amd64-codegen.h: Ongoing JIT work. + 2004-06-30 Zoltan Varga * sparc/sparc-codegen.h: Add SPARC64 support. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 68bcfec..55c1172 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -1,5 +1,5 @@ /* - * amd64-codegen.h: Macros for generating x86 code + * amd64-codegen.h: Macros for generating amd64 code * * Authors: * Paolo Molaro (lupus@ximian.com) @@ -9,8 +9,6 @@ * Patrik Torstensson * Zalman Stern * - * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. - * * Copyright (C) 2000 Intel Corporation. All rights reserved. * Copyright (C) 2001, 2002 Ximian, Inc. */ @@ -33,7 +31,7 @@ typedef enum { AMD64_R11 = 11, AMD64_R12 = 12, AMD64_R13 = 13, - AMD64R_14 = 14, + AMD64_R14 = 14, AMD64_R15 = 15, AMD64_NREG } AMD64_Reg_No; @@ -65,6 +63,15 @@ typedef enum AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; +#define AMD64_CALLEE_REGS ((1<= -((glong)1<<31) && (glong)val <= (((glong)1<<31)-1)) + #define x86_imm_emit64(inst,imm) \ do { \ amd64_imm_buf imb; imb.val = (long) (imm); \ @@ -157,34 +166,38 @@ typedef union { *(inst)++ = imb.b [7]; \ } while (0) -#define amd64_alu_reg_imm(inst,opc,reg,imm) \ +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ do { \ if ((reg) == X86_EAX) { \ - amd64_emit_rex(inst, 8, 0, 0, 0); \ + amd64_emit_rex(inst, size, 0, 0, 0); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ - x86_imm_emit64 ((inst), (imm)); \ + x86_imm_emit32 ((inst), (imm)); \ break; \ } \ if (x86_is_imm8((imm))) { \ - amd64_emit_rex(inst, 8, 0, 0, (reg)); \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ - amd64_emit_rex(inst, 8, 0, 0, (reg)); \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x81; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) -#define amd64_alu_reg_reg(inst,opc,dreg,reg) \ +#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ do { \ - amd64_emit_rex(inst, 8, (dreg), 0, (reg)); \ + amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) +#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) + #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ if ((size) == 2) \ @@ -263,20 +276,34 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_emit_rex(inst,8,(reg),0,(basereg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */ -#define amd64_mov_reg_imm(inst,reg,imm) \ +#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ - int _amd64_width_temp = ((long)(imm) == (long)(int)(long)(imm)); \ - amd64_emit_rex(inst, _amd64_width_temp ? 8 : 4, 0, 0, (reg)); \ + amd64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ - if (_amd64_width_temp) \ + if ((size) == 8) \ x86_imm_emit64 ((inst), (long)(imm)); \ else \ x86_imm_emit32 ((inst), (int)(long)(imm)); \ } while (0) +#define amd64_mov_reg_imm(inst,reg,imm) \ + do { \ + int _amd64_width_temp = ((long)(imm) == (long)(int)(long)(imm)); \ + amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ + } while (0) + +#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) + #define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ if ((size) == 2) \ @@ -316,20 +343,20 @@ typedef union { do { \ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 6, (basereg), (disp)); \ + x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ } while (0) #define amd64_pop_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x58 + (reg); \ + *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ } while (0) #define amd64_call_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 2, (reg)); \ + x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) #define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) @@ -340,7 +367,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg), (regp)); \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0) #define amd64_movsd_regp_reg(inst,regp,reg) \ @@ -349,7 +376,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg), (regp)); \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0) #define amd64_movss_reg_regp(inst,reg,regp) \ @@ -358,7 +385,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg), (regp)); \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0) #define amd64_movss_regp_reg(inst,regp,reg) \ @@ -367,7 +394,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg), (regp)); \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0) #define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ @@ -376,7 +403,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) #define amd64_movss_reg_membase(inst,reg,basereg,disp) \ @@ -385,7 +412,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) #define amd64_movsd_membase_reg(inst,reg,basereg,disp) \ @@ -394,7 +421,7 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) #define amd64_movss_membase_reg(inst,reg,basereg,disp) \ @@ -403,7 +430,402 @@ typedef union { amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) +/* The original inc_reg opcode is used as the REX prefix */ +#define amd64_inc_reg_size(inst,reg,size) \ + do { \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),0,(reg) & 0x7); \ + } while (0) + +#define amd64_dec_reg_size(inst,reg,size) \ + do { \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),1,(reg) & 0x7); \ + } while (0) + +#define amd64_padding_size(inst,size) x86_padding((inst),(size)) + +/* Generated from x86-codegen.h */ + +#define amd64_breakpoint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_breakpoint(inst); } while (0) +#define amd64_cld_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_cld(inst); } while (0) +#define amd64_stosb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); } while (0) +#define amd64_stosl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); } while (0) +#define amd64_stosd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); } while (0) +#define amd64_movsb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); } while (0) +#define amd64_movsl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); } while (0) +#define amd64_movsd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); } while (0) +#define amd64_prefix_size(inst,p,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prefix((inst), p); } while (0) +#define amd64_rdtsc_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); } while (0) +#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); } while (0) +#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) +#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg))); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_inc_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); } while (0) +#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); } while (0) +//#define amd64_inc_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); } while (0) +#define amd64_dec_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); } while (0) +#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); } while (0) +//#define amd64_dec_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); } while (0) +#define amd64_not_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); } while (0) +#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_not_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); } while (0) +#define amd64_neg_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); } while (0) +#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_neg_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); } while (0) +#define amd64_nop_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_nop(inst); } while (0) +//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) +#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); } while (0) +#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) +#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); } while (0) +#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) +//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); } while (0) +#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); } while (0) +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); } while (0) +#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); } while (0) +#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); } while (0) +#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); } while (0) +#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) +#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) +#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); } while (0) +#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) +#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); } while (0) +#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); } while (0) +#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); } while (0) +#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); } while (0) +#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); } while (0) +#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); } while (0) +#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); } while (0) +#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); } while (0) +#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); } while (0) +#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); } while (0) +#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); } while (0) +#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); } while (0) +#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); } while (0) +#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); } while (0) +#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); } while (0) +#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_clear_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); } while (0) +//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); } while (0) +#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); } while (0) +//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); } while (0) +//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); } while (0) +#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); } while (0) +#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); } while (0) +#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); } while (0) +#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); } while (0) +#define amd64_cdq_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); } while (0) +#define amd64_wait_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); } while (0) +#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); } while (0) +#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); } while (0) +#define amd64_fp_op_size(inst,opc,index,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op((inst),(opc),(index)); } while (0) +#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); } while (0) +#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); } while (0) +#define amd64_fstp_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); } while (0) +#define amd64_fcompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); } while (0) +#define amd64_fucompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); } while (0) +#define amd64_fnstsw_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); } while (0) +#define amd64_fnstcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); } while (0) +#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fldcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); } while (0) +#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fchs_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); } while (0) +#define amd64_frem_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); } while (0) +#define amd64_fxch_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); } while (0) +#define amd64_fcomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); } while (0) +#define amd64_fcomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); } while (0) +#define amd64_fucomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); } while (0) +#define amd64_fucomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); } while (0) +#define amd64_fld_size(inst,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); } while (0) +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) +#define amd64_fld80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); } while (0) +#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fild_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); } while (0) +#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) +#define amd64_fld_reg_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); } while (0) +#define amd64_fldz_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); } while (0) +#define amd64_fld1_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); } while (0) +#define amd64_fldpi_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); } while (0) +#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); } while (0) +#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); } while (0) +#define amd64_fst80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fst80_mem((inst),(mem)); } while (0) +#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fist_pop((inst),(mem),(is_long)); } while (0) +#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) +#define amd64_fstsw_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); } while (0) +#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); } while (0) +//#define amd64_push_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); } while (0) +#define amd64_push_regp_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); } while (0) +#define amd64_push_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); } while (0) +//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); } while (0) +#define amd64_push_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_imm((inst),(imm)); } while (0) +//#define amd64_pop_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); } while (0) +#define amd64_pop_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); } while (0) +#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_pushad_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); } while (0) +#define amd64_pushfd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); } while (0) +#define amd64_popad_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); } while (0) +#define amd64_popfd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); } while (0) +#define amd64_loop_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); } while (0) +#define amd64_loope_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); } while (0) +#define amd64_loopne_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); } while (0) +#define amd64_jump32_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); } while (0) +#define amd64_jump8_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); } while (0) +#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) +#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) +#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_jump_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_code((inst),(target)); } while (0) +#define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_disp((inst),(disp)); } while (0) +#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) +#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) +#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) +#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) +#define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) +//#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) +#define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) +#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_call_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_call_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_code((inst),(target)); } while (0) +//#define amd64_ret_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); } while (0) +#define amd64_ret_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); } while (0) +#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); } while (0) +#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); } while (0) +#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +#define amd64_enter_size(inst,framesize) do { amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); } while (0) +//#define amd64_leave_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); } while (0) +#define amd64_sahf_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); } while (0) +#define amd64_fsin_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fsin(inst); } while (0) +#define amd64_fcos_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fcos(inst); } while (0) +#define amd64_fabs_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fabs(inst); } while (0) +#define amd64_ftst_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ftst(inst); } while (0) +#define amd64_fxam_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fxam(inst); } while (0) +#define amd64_fpatan_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fpatan(inst); } while (0) +#define amd64_fprem_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fprem(inst); } while (0) +#define amd64_fprem1_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fprem1(inst); } while (0) +#define amd64_frndint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_frndint(inst); } while (0) +#define amd64_fsqrt_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fsqrt(inst); } while (0) +#define amd64_fptan_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fptan(inst); } while (0) +//#define amd64_padding_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_padding((inst),(size)); } while (0) +#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); } while (0) +#define amd64_epilog_size(inst,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); } while (0) + + + + + + + + + +#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) +#define amd64_cld(inst) amd64_cld_size(inst,8) +#define amd64_stosb(inst) amd64_stosb_size(inst,8) +#define amd64_stosl(inst) amd64_stosl_size(inst,8) +#define amd64_stosd(inst) amd64_stosd_size(inst,8) +#define amd64_movsb(inst) amd64_movsb_size(inst,8) +#define amd64_movsl(inst) amd64_movsl_size(inst,8) +#define amd64_movsd(inst) amd64_movsd_size(inst,8) +#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) +#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) +#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) +#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) +#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) +#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) +#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) +#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) +#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) +#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) +#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) +#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) +#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) +#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) +#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) +#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) +#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) +#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) +#define amd64_nop(inst) amd64_nop_size(inst,8) +//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) +#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) +//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) +#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) +#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) +#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) +#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) +#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) +#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) +#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) +#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) +#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) +#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) +#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) +#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) +#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) +#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) +#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) +#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) +#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) +#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) +#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) +#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) +#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) +#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) +#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) +#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) +#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) +#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) +//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) +//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) +//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) +//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) +//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) +#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) +#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) +//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) +#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) +//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) +#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) +#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) +//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) +#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) +#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) +#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) +#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) +#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) +#define amd64_cdq(inst) amd64_cdq_size(inst,8) +#define amd64_wait(inst) amd64_wait_size(inst,8) +#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) +#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) +#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) +#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) +#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) +#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) +#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) +#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) +#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) +#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) +#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) +#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) +#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) +#define amd64_fchs(inst) amd64_fchs_size(inst,8) +#define amd64_frem(inst) amd64_frem_size(inst,8) +#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) +#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) +#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) +#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) +#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) +#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) +#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) +#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) +#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) +#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) +#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) +#define amd64_fldz(inst) amd64_fldz_size(inst,8) +#define amd64_fld1(inst) amd64_fld1_size(inst,8) +#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) +#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) +#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) +#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) +#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) +#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) +#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) +#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) +//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) +#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) +#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) +//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) +#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) +#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) +//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) +#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) +#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) +#define amd64_pushad(inst) amd64_pushad_size(inst,8) +#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) +#define amd64_popad(inst) amd64_popad_size(inst,8) +#define amd64_popfd(inst) amd64_popfd_size(inst,8) +#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) +#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) +#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) +#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) +#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) +#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) +#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) +#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) +#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) +#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) +#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) +#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) +#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) +#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) +#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) +#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) +#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) +#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) +//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) +#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) +#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) +#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) +//#define amd64_ret(inst) amd64_ret_size(inst,8) +#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) +#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) +#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) +#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) +#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) +//#define amd64_leave(inst) amd64_leave_size(inst,8) +#define amd64_sahf(inst) amd64_sahf_size(inst,8) +#define amd64_fsin(inst) amd64_fsin_size(inst,8) +#define amd64_fcos(inst) amd64_fcos_size(inst,8) +#define amd64_fabs(inst) amd64_fabs_size(inst,8) +#define amd64_ftst(inst) amd64_ftst_size(inst,8) +#define amd64_fxam(inst) amd64_fxam_size(inst,8) +#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) +#define amd64_fprem(inst) amd64_fprem_size(inst,8) +#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) +#define amd64_frndint(inst) amd64_frndint_size(inst,8) +#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) +#define amd64_fptan(inst) amd64_fptan_size(inst,8) +#define amd64_padding(inst,size) amd64_padding_size(inst,size) +#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) +#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) + #endif // AMD64_H -- cgit v1.1 From b58d4fba4fad9c9cd52604adf39ffe578e407b14 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 23 Jul 2004 20:05:59 +0000 Subject: 2004-07-23 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. svn path=/trunk/mono/; revision=31426 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 33 ++++++++++++++++++++------------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3156077..d06871f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-23 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + 2004-07-23 zovarga * amd64/amd64-codegen.h: Ongoing JIT work. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 55c1172..814ccc5 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -283,6 +283,13 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define amd64_movsxd_reg_reg(inst,dreg,reg) \ + do { \ + amd64_emit_rex(inst,8,(dreg),0,(reg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */ @@ -354,7 +361,7 @@ typedef union { #define amd64_call_reg(inst,reg) \ do { \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + amd64_emit_rex(inst, 8, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) @@ -415,7 +422,7 @@ typedef union { x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) -#define amd64_movsd_membase_reg(inst,reg,basereg,disp) \ +#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ *(inst)++ = (unsigned char)0xf2; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ @@ -522,7 +529,7 @@ typedef union { //#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) //#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) #define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) //#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); } while (0) //#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); } while (0) #define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); } while (0) @@ -552,7 +559,7 @@ typedef union { #define amd64_fnstcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); } while (0) #define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_fldcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); } while (0) -#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_fchs_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); } while (0) #define amd64_frem_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); } while (0) #define amd64_fxch_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); } while (0) @@ -561,23 +568,23 @@ typedef union { #define amd64_fucomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); } while (0) #define amd64_fucomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); } while (0) #define amd64_fld_size(inst,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); } while (0) -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) #define amd64_fld80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); } while (0) #define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_fild_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); } while (0) -#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) +#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) #define amd64_fld_reg_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); } while (0) #define amd64_fldz_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); } while (0) #define amd64_fld1_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); } while (0) #define amd64_fldpi_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); } while (0) -#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); } while (0) -#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); } while (0) -#define amd64_fst80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fst80_mem((inst),(mem)); } while (0) -#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fist_pop((inst),(mem),(is_long)); } while (0) -#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) +#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); } while (0) +#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); } while (0) +#define amd64_fst80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); } while (0) +#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); } while (0) +#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) #define amd64_fstsw_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); } while (0) -#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); } while (0) +#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); } while (0) //#define amd64_push_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); } while (0) #define amd64_push_regp_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); } while (0) #define amd64_push_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); } while (0) -- cgit v1.1 From a451b99d1a51fe3ffa7334ffbe6865f388e549c0 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 24 Jul 2004 18:29:32 +0000 Subject: 2004-07-24 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. svn path=/trunk/mono/; revision=31431 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 26 +++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index d06871f..39a967f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-24 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + 2004-07-23 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 814ccc5..ba1edab 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -151,6 +151,14 @@ typedef union { */ #endif +#define amd64_modrm_mod(modrm) ((modrm) >> 6) +#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define amd64_modrm_rm(modrm) ((modrm) & 0x7) + +#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) +#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) +#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) + #define amd64_is_imm32(val) ((glong)val >= -((glong)1<<31) && (glong)val <= (((glong)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ @@ -431,7 +439,7 @@ typedef union { x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0) -#define amd64_movss_membase_reg(inst,reg,basereg,disp) \ +#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ *(inst)++ = (unsigned char)0xf3; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ @@ -460,14 +468,14 @@ typedef union { /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_breakpoint(inst); } while (0) -#define amd64_cld_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_cld(inst); } while (0) +#define amd64_cld_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); } while (0) #define amd64_stosb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); } while (0) #define amd64_stosl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); } while (0) #define amd64_stosd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); } while (0) #define amd64_movsb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); } while (0) #define amd64_movsl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); } while (0) #define amd64_movsd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); } while (0) -#define amd64_prefix_size(inst,p,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prefix((inst), p); } while (0) +#define amd64_prefix_size(inst,p,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_prefix((inst), p); } while (0) #define amd64_rdtsc_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); } while (0) #define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) #define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); } while (0) @@ -547,11 +555,11 @@ typedef union { #define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); } while (0) #define amd64_cdq_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); } while (0) #define amd64_wait_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); } while (0) -#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); } while (0) -#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); } while (0) -#define amd64_fp_op_size(inst,opc,index,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op((inst),(opc),(index)); } while (0) -#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); } while (0) -#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); } while (0) +#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); } while (0) +#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); } while (0) +#define amd64_fp_op_size(inst,opc,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); } while (0) +#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); } while (0) +#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); } while (0) #define amd64_fstp_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); } while (0) #define amd64_fcompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); } while (0) #define amd64_fucompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); } while (0) @@ -618,7 +626,7 @@ typedef union { #define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) -#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_call_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_call_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_call_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_code((inst),(target)); } while (0) //#define amd64_ret_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); } while (0) #define amd64_ret_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); } while (0) -- cgit v1.1 From 77b5d5d9a5c508cef6a93be733818c446b9fe12c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 28 Jul 2004 20:14:03 +0000 Subject: 2004-07-28 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. svn path=/trunk/mono/; revision=31586 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 21 ++++++++++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index 39a967f..0c6fabf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-28 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + 2004-07-24 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index ba1edab..2b45949 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -229,7 +229,7 @@ typedef union { case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) @@ -269,7 +269,7 @@ typedef union { case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) #define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ @@ -281,14 +281,14 @@ typedef union { case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) #define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst,8,(reg),0,(basereg)); \ *(inst)++ = (unsigned char)0x63; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) #define amd64_movsxd_reg_reg(inst,dreg,reg) \ @@ -319,6 +319,8 @@ typedef union { #define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) +#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) + #define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ if ((size) == 2) \ @@ -326,15 +328,15 @@ typedef union { amd64_emit_rex(inst, (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) @@ -343,7 +345,7 @@ typedef union { do { \ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ @@ -463,7 +465,8 @@ typedef union { x86_reg_emit ((inst),1,(reg) & 0x7); \ } while (0) -#define amd64_padding_size(inst,size) x86_padding((inst),(size)) +#define amd64_padding_size(inst,size) \ + do { if (size == 1) x86_padding ((inst),(size)); else { amd64_emit_rex ((inst),8,0,0,0); x86_padding((inst),(size) - 1); } } while (0) /* Generated from x86-codegen.h */ -- cgit v1.1 From 128d13d3973f07f5afba3ac7022bd9a4e7550626 Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Thu, 29 Jul 2004 17:10:53 +0000 Subject: 2004-07-29 Ben Maurer * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm svn path=/trunk/mono/; revision=31622 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index 0c6fabf..1f17615 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-29 Ben Maurer + + * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm + 2004-07-28 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index e0b12ff..bc3fd07 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -528,6 +528,13 @@ typedef union { x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) + +#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \ + do { \ + *(inst)++ = (unsigned char)0x80; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) #define x86_alu_mem_reg(inst,opc,mem,reg) \ do { \ -- cgit v1.1 From 57ac232b2805d02a4e2b6322ed9532313337e56c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 30 Jul 2004 16:01:49 +0000 Subject: 2004-07-30 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. svn path=/trunk/mono/; revision=31664 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 24 +++++++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 1f17615..e913f7f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-07-30 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + 2004-07-29 Ben Maurer * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 2b45949..f34fcee 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -33,6 +33,7 @@ typedef enum { AMD64_R13 = 13, AMD64_R14 = 14, AMD64_R15 = 15, + AMD64_RIP = 16, AMD64_NREG } AMD64_Reg_No; @@ -256,7 +257,9 @@ typedef union { case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - x86_mem_emit ((inst), (reg), (mem)); \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ } while (0) #define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ @@ -341,11 +344,20 @@ typedef union { } \ } while (0) +#define amd64_membase_emit(inst,reg,basereg,disp) do { \ + if ((basereg) == AMD64_RIP) { \ + x86_address_byte ((inst), 0, (reg)&0x7, 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + else \ + x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ +} while (0) + #define amd64_lea_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ @@ -467,6 +479,12 @@ typedef union { #define amd64_padding_size(inst,size) \ do { if (size == 1) x86_padding ((inst),(size)); else { amd64_emit_rex ((inst),8,0,0,0); x86_padding((inst),(size) - 1); } } while (0) + +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ + amd64_emit_rex ((inst),0,0,0,(basereg)); \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + amd64_membase_emit ((inst), 0, (basereg), (disp)); \ +} while (0) /* Generated from x86-codegen.h */ @@ -579,7 +597,7 @@ typedef union { #define amd64_fucomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); } while (0) #define amd64_fucomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); } while (0) #define amd64_fld_size(inst,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); } while (0) -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) +//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) #define amd64_fld80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); } while (0) #define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_fild_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); } while (0) -- cgit v1.1 From 4e44c97a16962680e5009c97c0022e10ddbbad30 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 30 Jul 2004 18:23:23 +0000 Subject: Optimize code generation macros and standardize svn path=/trunk/mono/; revision=31683 --- s390/s390-codegen.h | 658 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 507 insertions(+), 151 deletions(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index 00bf06b..bb2edbb 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -160,93 +160,472 @@ typedef enum { #define S390_CC_NC 12 #define S390_CC_UN 15 -#define s390_word(addr, value) do {*((guint32 *) addr) = (guint32) (value); \ - ((guint32 *) addr)++;} while (0) -#define s390_float(addr, value) do {*((guint32 *) addr) = (guint32) (value); \ - ((guint32 *) addr)++;} while (0) -#define s390_llong(addr, value) do {*((guint64 *) addr) = (guint64) (value); \ - ((guint64 *) addr)++;} while (0) -#define s390_double(addr, value) do {*((guint64 *) addr) = (guint64) (value); \ - ((guint64 *) addr)++;} while (0) -#define s390_emit16(c, x) do {*((guint16 *) c) = x; ((guint16 *) c)++;} while(0) -#define s390_emit32(c, x) do {*((guint32 *) c) = x; ((guint32 *) c)++;} while(0) -#define s390_basr(code, r1, r2) s390_emit16 (code, (13 << 8 | (r1) << 4 | (r2))) -#define s390_bras(code, r, o) s390_emit32 (code, (167 << 24 | (r) << 20 | 5 << 16 | (o))) -#define s390_brasl(code, r, o) do {s390_emit16 (code, (192 << 8 | (r) << 4 | 5)); \ - s390_emit32 (code, (o));} while(0) -#define s390_ahi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 10 << 16 | ((v) & 0xffff))) -#define s390_alcr(code, r1, r2) s390_emit32 (code, (185 << 24 | 152 << 16 | (r1) << 4 | (r2))) -#define s390_ar(code, r1, r2) s390_emit16 (code, (26 << 8 | (r1) << 4 | (r2))) -#define s390_alr(code, r1, r2) s390_emit16 (code, (30 << 8 | (r1) << 4 | (r2))) -#define s390_a(code, r, x, b, d) s390_emit32 (code, (90 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_al(code, r, x, b, d) s390_emit32 (code, (94 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_slbr(code, r1, r2) s390_emit32 (code, (185 << 24 | 153 << 16 | (r1) << 4 | (r2))) -#define s390_sr(code, r1, r2) s390_emit16 (code, (27 << 8 | (r1) << 4 | (r2))) -#define s390_slr(code, r1, r2) s390_emit16 (code, (31 << 8 | (r1) << 4 | (r2))) -#define s390_s(code, r, x, b, d) s390_emit32 (code, (91 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_sl(code, r, x, b, d) s390_emit32 (code, (95 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_mr(code, r1, r2) s390_emit16 (code, (28 << 8 | (r1) << 4 | (r2))) -#define s390_m(code, r, x, b, d) s390_emit32 (code, (92 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_msr(code, r1, r2) s390_emit32 (code, (178 << 24 | 82 << 16 | (r1) << 4| (r2))) -#define s390_ms(code, r, x, b, d) s390_emit32 (code, (113 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_mlr(code, r1, r2) s390_emit32 (code, (185 << 24 | 150 << 16 | (r1) << 4| (r2))) -#define s390_dr(code, r1, r2) s390_emit16 (code, (29 << 8 | (r1) << 4 | (r2))) -#define s390_d(code, r, x, b, d) s390_emit32 (code, (93 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_dlr(code, r1, r2) s390_emit32 (code, (185 << 24 | 151 << 16 | (r1) << 4| (r2))) -#define s390_br(code, r) s390_emit16 (code, (7 << 8 | 15 << 4 | (r))) -#define s390_nr(code, r1, r2) s390_emit16 (code, (20 << 8 | (r1) << 4 | (r2))) -#define s390_n(code, r, x, b, d) s390_emit32 (code, (84 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_or(code, r1, r2) s390_emit16 (code, (22 << 8 | (r1) << 4 | (r2))) -#define s390_o(code, r, x, b, d) s390_emit32 (code, (86 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_xr(code, r1, r2) s390_emit16 (code, (23 << 8 | (r1) << 4 | (r2))) -#define s390_x(code, r, x, b, d) s390_emit32 (code, (87 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_lr(code, r1, r2) s390_emit16 (code, (24 << 8 | (r1) << 4 | (r2))) -#define s390_ltr(code, r1, r2) s390_emit16 (code, (18 << 8 | (r1) << 4 | (r2))) -#define s390_l(code, r, x, b, d) s390_emit32 (code, (88 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_lcr(code, r1, r2) s390_emit16 (code, (19 << 8 | (r1) << 4 | (r2))) -#define s390_lnr(code, r1, r2) s390_emit16 (code, (17 << 8 | (r1) << 4 | (r2))) -#define s390_lpr(code, r1, r2) s390_emit16 (code, (16 << 8 | (r1) << 4 | (r2))) -#define s390_lm(code, r1, r2, b, d) s390_emit32 (code, (152 << 24 | (r1) << 20 | (r2) << 16 \ - | (b) << 12 | ((d) & 0xfff))) -#define s390_lh(code, r, x, b, d) s390_emit32 (code, (72 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_lhi(code, r, v) s390_emit32 (code, (167 << 24 | (r) << 20 | 8 << 16 | ((v) & 0xffff))) -#define s390_ic(code, r, x, b, d) s390_emit32 (code, (67 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_icm(code, r, m, b, d) s390_emit32 (code, (191 << 24 | (r) << 20 | (m) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_st(code, r, x, b, d) s390_emit32 (code, (80 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_stm(code, r1, r2, b, d) s390_emit32 (code, (144 << 24 | (r1) << 20 | (r2) << 16 \ - | (b) << 12 | ((d) & 0xfff))) -#define s390_stam(c, r1, r2, b, d) s390_emit32 (code, (155 << 24 | (r1) << 20 | (r2) << 16 \ - | (b) << 12 | ((d) & 0xfff))) -#define s390_lam(c, r1, r2, b, d) s390_emit32 (code, (154 << 24 | (r1) << 20 | (r2) << 16 \ - | (b) << 12 | ((d) & 0xfff))) -#define s390_sth(code, r, x, b, d) s390_emit32 (code, (64 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_stc(code, r, x, b, d) s390_emit32 (code, (66 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_stcm(code, r, m, b, d) s390_emit32 (code, (190 << 24 | (r) << 20 | (m) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_la(code, r, x, b, d) s390_emit32 (code, (65 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_larl(code, r, o) do { \ - s390_emit16 (code, (192 << 8 | (r) << 4)); \ - s390_emit32 (code, (o)); \ - } while (0) -#define s390_ld(code, f, x, b, d) s390_emit32 (code, (104 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_le(code, f, x, b, d) s390_emit32 (code, (120 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_std(code, f, x, b, d) s390_emit32 (code, (96 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_ste(code, f, x, b, d) s390_emit32 (code, (112 << 24 | (f) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_mvc(c, l, b1, d1, b2, d2) do {s390_emit32 (c, (210 << 24 | ((((l)-1) << 16) & 0x00ff0000) | \ - (b1) << 12 | ((d1) & 0xfff))); \ - s390_emit16 (c, ((b2) << 12 | ((d2) & 0xfff)));} while (0) -#define s390_mvcl(c, r1, r2) s390_emit16 (c, (14 << 8 | (r1) << 4 | (r2))) -#define s390_mvcle(c, r1, r3, d2, b2) s390_emit32 (c, (168 << 24 | (r1) << 20 | \ - (r3) << 16 | (b2) << 12 | \ - ((d2) & 0xfff))) -#define s390_break(c) s390_emit16 (c, 0) -#define s390_nill(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 7 << 16 | ((v) & 0xffff))) -#define s390_nilh(c, r1, v) s390_emit32 (c, (165 << 24 | (r1) << 20 | 6 << 16 | ((v) & 0xffff))) -#define s390_brc(c, m, d) s390_emit32 (c, (167 << 24 | ((m) & 0xff) << 20 | 4 << 16 | ((d) & 0xffff))) -#define s390_cr(c, r1, r2) s390_emit16 (c, (25 << 8 | (r1) << 4 | (r2))) -#define s390_clr(c, r1, r2) s390_emit16 (c, (21 << 8 | (r1) << 4 | (r2))) -#define s390_c(c, r, x, b, d) s390_emit32 (c, (89 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_cl(c, r, x, b, d) s390_emit32 (c, (85 << 24 | (r) << 20 | (x) << 16 | (b) << 12 | ((d) & 0xfff))) -#define s390_chi(c, r, i) s390_emit32 (c, (167 << 24 | (r) << 20 | 15 << 16 | ((i) & 0xffff))) +#define s390_word(addr, value) do \ +{ \ + * (guint32 *) addr = (guint32) value; \ + addr += sizeof(guint32); \ +} while (0) + +#define s390_float(addr, value) do \ +{ \ + * (gfloat *) addr = (gfloat) value; \ + addr += sizeof(gfloat); \ +} while (0) + +#define s390_llong(addr, value) do \ +{ \ + * (guint64 *) addr = (guint64) value; \ + addr += sizeof(guint64); \ +} while (0) + +#define s390_double(addr, value) do \ +{ \ + * (gdouble *) addr = (gdouble) value; \ + addr += sizeof(gdouble); \ +} while (0) + +typedef struct { + short op; +} E_Format; + +typedef struct { + char op; + int im; +} I_Format; + +typedef struct { + char op; + char r1 : 4; + char r2 : 4; +} RR_Format; + +typedef struct { + short op; + char xx; + char r1 : 4; + char r2 : 4; +} RRE_Format; + +typedef struct { + short op; + char r1 : 4; + char xx : 4; + char r3 : 4; + char r2 : 4; +} RRF_Format_1; + +typedef struct { + short op; + char m3 : 4; + char xx : 4; + char r1 : 4; + char r2 : 4; +} RRF_Format_2; + +typedef struct { + short op; + char r3 : 4; + char m4 : 4; + char r1 : 4; + char r2 : 4; +} RRF_Format_3; + +typedef struct { + char op; + char r1 : 4; + char x2 : 4; + char b2 : 4; + short d2 : 12; +} RX_Format; + +typedef struct { + char op1; + char r1 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 12; + char xx; + char op2; +} RXE_Format; + +typedef struct { + char op1; + char r3 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 12; + char r1 : 4; + char xx : 4; + char op2; +} RXF_Format; + +typedef struct { + char op1; + char r1 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RXY_Format __attribute__ ((packed)); + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_1; + +typedef struct { + char op; + char r1 : 4; + char m3 : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_2; + +typedef struct { + char op; + char r1 : 4; + char xx : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_3; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RSY_Format_1 __attribute__ ((packed)); + +typedef struct { + char op1; + char r1 : 4; + char m3 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RSY_Format_2 __attribute__ ((packed)); + +typedef struct { + char op1; + char l1 : 4; + char xx : 4; + char b1 : 4; + int d1 : 12; + char yy; + char op2; +} RSL_Format; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + short i2; +} RSI_Format; + +typedef struct { + char op1; + char r1 : 4; + char op2 : 4; + short i2; +} RI_Format; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + short i2; + char xx; + char op2; +} RIE_Format; + +typedef struct { + char op1; + char r1 : 4; + char op2 : 4; + int i2; +} RIL_Format_1 __attribute__ ((packed)); + +typedef struct { + char op1; + char m1 : 4; + char op2 : 4; + int i2; +} RIL_Format_2 __attribute__ ((packed)); + +typedef struct { + char op; + char i2; + char b1 : 4; + short d1 : 12; +} SI_Format; + +typedef struct { + char op1; + char i2; + char b1 : 4; + int d1 : 20; + char op2; +} SIY_Format __attribute__ ((packed)); + +typedef struct { + short op; + char b2 : 4; + short d2 : 12; +} S_Format; + +typedef struct { + char op; + char ll; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_1; + +typedef struct { + char op; + char l1 : 4; + char l2 : 4; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_2; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_3; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b2 : 4; + short d2 : 12; + char b4 : 4; + short d4 : 12; +} SS_Format_4; + +typedef struct { + short op; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SSE_Format __attribute__ ((packed)); + +#define s390_emit16(c, x) do \ +{ \ + *((guint16 *) c) = x; \ + c += sizeof(guint16); \ +} while(0) + +#define s390_emit32(c, x) do \ +{ \ + *((guint32 *) c) = x; \ + c += sizeof(guint32); \ +} while(0) + +#define S390_E(c,opc) s390_emit16(c,opc) + +#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm)) + +#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2)) + +#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2)) + +#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2)) + +#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2)) + +#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2)) + +#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RXE(c,opc,g1,n2,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RXY(c,opc,g1,n2,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSL(c,opc,ln,s1,p1) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \ + s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff))) + +#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff))) + +#define S390_RIE(c,opc,g1,g3,m2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ + s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \ +} while (0) + +#define S390_RIL_1(c,opc,g1,m2) do \ +{ \ + s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \ + s390_emit32(c, m2); \ +} while (0) + +#define S390_RIL_2(c,opc,k1,m2) do \ +{ \ + s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \ + s390_emit32(c, m2); \ +} while (0) + +#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff))); + +#define S390_SIY(c,opc,s1,p1,m2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | m2)); \ + s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \ +{ \ + s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ + (s2) << 12 | ((p2) & 0xfff))); \ + s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \ +} while (0) + +#define S390_SSE(c,opc,s1,p1,s2,p2) do \ +{ \ + s390_emit16(c, opc); \ + s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) +#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) +#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) +#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) +#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) +#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) +#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) +#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) +#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) +#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) +#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) +#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) +#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) +#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) +#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) +#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) +#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) +#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) +#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) +#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) +#define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) +#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) +#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) +#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) +#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) +#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) +#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) +#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) +#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) +#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) +#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) +#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) +#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) +#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) +#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) +#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) +#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) +#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) +#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) +#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) +#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) +#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) +#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) +#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) +#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) +#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) +#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) +#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) +#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) +#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) +#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) +#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) +#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) +#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) +#define s390_break(c) S390_RR(c, 0, 0, 0) +#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) +#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) +#define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) +#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) +#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) +#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) @@ -259,70 +638,47 @@ typedef enum { #define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) #define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) #define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) -#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) +#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) #define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) -#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) +#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) #define s390_jc(c, m, d) s390_brc(c, m, d) -#define s390_jcl(c, m, d) do {s390_emit16 (c, (192 << 8 | (m) << 4 | 4)); \ - s390_emit32 (c, (d));} while(0) -#define s390_slda(c, r, b, d) s390_emit32 (c, (143 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_sldl(c, r, b, d) s390_emit32 (c, (141 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_srda(c, r, b, d) s390_emit32 (c, (142 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_srdl(c, r, b, d) s390_emit32 (c, (140 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_sla(c, r, b, d) s390_emit32 (c, (139 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_sll(c, r, b, d) s390_emit32 (c, (137 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_sra(c, r, b, d) s390_emit32 (c, (138 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_srl(c, r, b, d) s390_emit32 (c, (136 << 24 | (r) << 20 | (b) << 12 | ((d) & 0xfff))) -#define s390_sqdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 21 << 16 | ((r1) << 4) | (r2))) -#define s390_sqebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 20 << 16 | ((r1) << 4) | (r2))) -#define s390_adbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 26 << 16 | ((r1) << 4) | (r2))) -#define s390_aebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 10 << 16 | ((r1) << 4) | (r2))) -#define s390_adb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (26)); \ - } while (0) -#define s390_sdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 27 << 16 | ((r1) << 4) | (r2))) -#define s390_sdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (27)); \ - } while (0) -#define s390_sebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 11 << 16 | ((r1) << 4) | (r2))) -#define s390_mdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 28 << 16 | ((r1) << 4) | (r2))) -#define s390_meebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 23 << 16 | ((r1) << 4) | (r2))) -#define s390_ldr(c, r1, r2) s390_emit16 (c, (40 << 8 | (r1) << 4 | (r2))) -#define s390_ler(c, r1, r2) s390_emit16 (c, (56 << 8 | (r1) << 4 | (r2))) -#define s390_lzdr(c, r1) s390_emit32 (c, (179 << 24 | 117 << 16 | (r1) << 4)) -#define s390_lzer(c, r1) s390_emit32 (c, (179 << 24 | 116 << 16 | (r1) << 4)) -#define s390_ddbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 29 << 16 | ((r1) << 4) | (r2))) -#define s390_debr(c, r1, r2) s390_emit32 (c, (179 << 24 | 13 << 16 | ((r1) << 4) | (r2))) -#define s390_didbr(c, r1, r2, m, r3) s390_emit32 (c, (179 << 24 | 91 << 16 | ((r3) << 12) | ((m) << 8) | ((r1) << 4) | (r2))) -#define s390_lcdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 19 << 16 | ((r1) << 4) | (r2))) -#define s390_lndbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 17 << 16 | ((r1) << 4) | (r2))) -#define s390_ldebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 4 << 16 | ((r1) << 4) | (r2))) -#define s390_lnebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 1 << 16 | ((r1) << 4) | (r2))) -#define s390_ledbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 68 << 16 | ((r1) << 4) | (r2))) -#define s390_ldeb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (4)); \ - } while (0) -#define s390_cfdbr(c, r1, m, f2) s390_emit32 (c, (179 << 24 | 153 << 16 | (m) << 12 | (r1) << 4 | (f2))) -#define s390_cdfbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 149 << 16 | (r1) << 4 | (r2))) -#define s390_cefbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 148 << 16 | (r1) << 4 | (r2))) -#define s390_cdbr(c, r1, r2) s390_emit32 (c, (179 << 24 | 25 << 16 | (r1) << 4 | (r2))) -#define s390_cebr(c, r1, r2) s390_emit32 (c, (179 << 24 | 9 << 16 | (r1) << 4 | (r2))) -#define s390_cdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (25)); \ - } while (0) -#define s390_tcdb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (17)); \ - } while (0) -#define s390_tedb(c, r, x, b, d) do {s390_emit32 (c, (237 << 24 | (r) << 20 | \ - (x) << 16 | (b) << 12 | ((d) & 0xfff))); \ - s390_emit16 (c, (16)); \ - } while (0) -#define s390_stfpc(c, b, d) s390_emit32 (c, (178 << 24 | 156 << 16 | \ - (b) << 12 | ((d) & 0xfff))) - +#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) +#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) +#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) +#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) +#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) +#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) +#define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) +#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) +#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) +#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) +#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) +#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) +#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) +#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) +#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) +#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) +#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) +#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) +#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) +#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) +#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) +#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) +#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) +#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) +#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) +#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) +#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) +#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) +#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) +#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) +#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) +#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) +#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) +#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) +#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) +#define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) +#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) #endif -- cgit v1.1 From 5ebecc33aca9878d2071c8766e5741cd6434d676 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 30 Jul 2004 23:11:29 +0000 Subject: Add some s390 specific tests svn path=/trunk/mono/; revision=31690 --- s390/ChangeLog | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 s390/ChangeLog diff --git a/s390/ChangeLog b/s390/ChangeLog new file mode 100644 index 0000000..6d033e7 --- /dev/null +++ b/s390/ChangeLog @@ -0,0 +1,4 @@ +2004-07-30 Neale Ferguson + + * s390-codegen.h: reworked macros for code generation. + -- cgit v1.1 From 4ad821169050e70979e71bbd5229557570059139 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Wed, 4 Aug 2004 02:54:52 +0000 Subject: S/390 64-bit support tailc processing fix for S/390 32-bit svn path=/trunk/mono/; revision=31840 --- s390/s390-codegen.h | 2 + s390x/ChangeLog | 4 + s390x/Makefile.am | 7 + s390x/s390x-codegen.h | 737 +++++++++++++++++++++++++++++++ s390x/tramp.c | 1148 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1898 insertions(+) create mode 100644 s390x/ChangeLog create mode 100644 s390x/Makefile.am create mode 100644 s390x/s390x-codegen.h create mode 100644 s390x/tramp.c diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index bb2edbb..f785ff0 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -144,8 +144,10 @@ typedef enum { #define STK_BASE s390_r15 #define S390_MINIMAL_STACK_SIZE 96 +#define S390_PARM_SAVE_OFFSET 8 #define S390_REG_SAVE_OFFSET 24 #define S390_RET_ADDR_OFFSET 56 +#define S390_FLOAT_ADDR_OFFSET 64 #define S390_CC_ZR 8 #define S390_CC_NE 7 diff --git a/s390x/ChangeLog b/s390x/ChangeLog new file mode 100644 index 0000000..c65fce5 --- /dev/null +++ b/s390x/ChangeLog @@ -0,0 +1,4 @@ +2004-08-03 Neale Ferguson + + * s390x-codegen.h Makefile.am tramp.c: S/390 64-bit interpreter + diff --git a/s390x/Makefile.am b/s390x/Makefile.am new file mode 100644 index 0000000..e7466d9 --- /dev/null +++ b/s390x/Makefile.am @@ -0,0 +1,7 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-s390x.la + +libmonoarch_s390x_la_SOURCES = tramp.c s390x-codegen.h + diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h new file mode 100644 index 0000000..797d751 --- /dev/null +++ b/s390x/s390x-codegen.h @@ -0,0 +1,737 @@ +/* + Copyright (C) 2001 Radek Doulik +*/ + +#ifndef S390X_H +#define S390X_H +#include +#include + +#define FLOAT_REGS 2 /* No. float registers for parms */ +#define GENERAL_REGS 5 /* No. general registers for parms */ + +#define ARG_BASE s390_r10 /* Register for addressing arguments*/ +#define STKARG \ + (i*(sizeof(stackval))) /* Displacement of ith argument */ + +#define MINV_POS 160 /* MonoInvocation stack offset */ +#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) +#define OBJ_POS 8 +#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) + +#define MIN_CACHE_LINE 256 + +/*------------------------------------------------------------------*/ +/* Sequence to add an int/long long to parameters to stack_from_data*/ +/*------------------------------------------------------------------*/ +#define ADD_ISTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a float/double to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_RSTACK_PARM(i) \ + if (fpr_param < FLOAT_REGS) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + float_pos + (fpr_param * sizeof(float) * (i))); \ + fpr_param++; \ + } else { \ + stack_param += (stack_param % (i)); \ + s390_la (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \ + stack_param += (i); \ + } + +/*------------------------------------------------------------------*/ +/* Sequence to add a structure ptr to parameters to stack_from_data */ +/*------------------------------------------------------------------*/ +#define ADD_TSTACK_PARM \ + if (reg_param < GENERAL_REGS) { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param++; \ + } else { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param++; \ + } + +#define ADD_PSTACK_PARM(r, i) \ + if (reg_param < GENERAL_REGS-(r)) { \ + s390_la (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ + reg_param += (i); \ + } else { \ + s390_l (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + stack_param++; \ + } +typedef enum { + s390_r0 = 0, + s390_r1, + s390_r2, + s390_r3, + s390_r4, + s390_r5, + s390_r6, + s390_r7, + s390_r8, + s390_r9, + s390_r10, + s390_r11, + s390_r12, + s390_r13, + s390_r14, + s390_r15, +} S390IntRegister; + +typedef enum { + s390_f0 = 0, + s390_f1, + s390_f2, + s390_f3, + s390_f4, + s390_f5, + s390_f6, + s390_f7, + s390_f8, + s390_f9, + s390_f10, + s390_f11, + s390_f12, + s390_f13, + s390_f14, + s390_f15, +} S390FloatRegister; + +typedef enum { + s390_a0 = 0, + s390_a1, + s390_a2, + s390_a3, + s390_a4, + s390_a5, + s390_a6, + s390_a7, + s390_a8, + s390_a9, + s390_a10, + s390_a11, + s390_a12, + s390_a13, + s390_a14, + s390_a15, +} S390AccRegister; + +typedef enum { + s390_fpc = 256, +} S390SpecialRegister; + +#define s390_is_imm16(val) ((gint)val >= (gint)-(1<<15) && \ + (gint)val <= (gint)((1<<15)-1)) +#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) +#define s390_is_imm12(val) ((gint)val >= (gint)-(1<<11) && \ + (gint)val <= (gint)((1<<15)-1)) +#define s390_is_uimm12(val) ((gint)val >= 0 && (gint)val <= 4095) + +#define STK_BASE s390_r15 +#define S390_MINIMAL_STACK_SIZE 160 +#define S390_REG_SAVE_OFFSET 48 +#define S390_PARM_SAVE_OFFSET 16 +#define S390_RET_ADDR_OFFSET 112 +#define S390_FLOAT_SAVE_OFFSET 128 + +#define S390_CC_ZR 8 +#define S390_CC_NE 7 +#define S390_CC_NZ 7 +#define S390_CC_LT 4 +#define S390_CC_GT 2 +#define S390_CC_GE 11 +#define S390_CC_LE 13 +#define S390_CC_OV 1 +#define S390_CC_NO 14 +#define S390_CC_CY 3 +#define S390_CC_NC 12 +#define S390_CC_UN 15 + +#define s390_word(addr, value) do \ +{ \ + * (guint32 *) addr = (guint32) value; \ + addr += sizeof(guint32); \ +} while (0) + +#define s390_float(addr, value) do \ +{ \ + * (gfloat *) addr = (gfloat) value; \ + addr += sizeof(gfloat); \ +} while (0) + +#define s390_llong(addr, value) do \ +{ \ + * (guint64 *) addr = (guint64) value; \ + addr += sizeof(guint64); \ +} while (0) + +#define s390_double(addr, value) do \ +{ \ + * (gdouble *) addr = (gdouble) value; \ + addr += sizeof(gdouble); \ +} while (0) + +typedef struct { + short op; +} E_Format; + +typedef struct { + char op; + int im; +} I_Format; + +typedef struct { + char op; + char r1 : 4; + char r2 : 4; +} RR_Format; + +typedef struct { + short op; + char xx; + char r1 : 4; + char r2 : 4; +} RRE_Format; + +typedef struct { + short op; + char r1 : 4; + char xx : 4; + char r3 : 4; + char r2 : 4; +} RRF_Format_1; + +typedef struct { + short op; + char m3 : 4; + char xx : 4; + char r1 : 4; + char r2 : 4; +} RRF_Format_2; + +typedef struct { + short op; + char r3 : 4; + char m4 : 4; + char r1 : 4; + char r2 : 4; +} RRF_Format_3; + +typedef struct { + char op; + char r1 : 4; + char x2 : 4; + char b2 : 4; + short d2 : 12; +} RX_Format; + +typedef struct { + char op1; + char r1 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 12; + char xx; + char op2; +} RXE_Format; + +typedef struct { + char op1; + char r3 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 12; + char r1 : 4; + char xx : 4; + char op2; +} RXF_Format; + +typedef struct { + char op1; + char r1 : 4; + char x2 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RXY_Format __attribute__ ((packed)); + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_1; + +typedef struct { + char op; + char r1 : 4; + char m3 : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_2; + +typedef struct { + char op; + char r1 : 4; + char xx : 4; + char b2 : 4; + int d2 : 12; +} RS_Format_3; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RSY_Format_1 __attribute__ ((packed)); + +typedef struct { + char op1; + char r1 : 4; + char m3 : 4; + char b2 : 4; + int d2 : 20; + char op2; +} RSY_Format_2 __attribute__ ((packed)); + +typedef struct { + char op1; + char l1 : 4; + char xx : 4; + char b1 : 4; + int d1 : 12; + char yy; + char op2; +} RSL_Format; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + short i2; +} RSI_Format; + +typedef struct { + char op1; + char r1 : 4; + char op2 : 4; + short i2; +} RI_Format; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + short i2; + char xx; + char op2; +} RIE_Format; + +typedef struct { + char op1; + char r1 : 4; + char op2 : 4; + int i2; +} RIL_Format_1 __attribute__ ((packed)); + +typedef struct { + char op1; + char m1 : 4; + char op2 : 4; + int i2; +} RIL_Format_2 __attribute__ ((packed)); + +typedef struct { + char op; + char i2; + char b1 : 4; + short d1 : 12; +} SI_Format; + +typedef struct { + char op1; + char i2; + char b1 : 4; + int d1 : 20; + char op2; +} SIY_Format __attribute__ ((packed)); + +typedef struct { + short op; + char b2 : 4; + short d2 : 12; +} S_Format; + +typedef struct { + char op; + char ll; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_1; + +typedef struct { + char op; + char l1 : 4; + char l2 : 4; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_2; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SS_Format_3; + +typedef struct { + char op; + char r1 : 4; + char r3 : 4; + char b2 : 4; + short d2 : 12; + char b4 : 4; + short d4 : 12; +} SS_Format_4; + +typedef struct { + short op; + char b1 : 4; + short d1 : 12; + char b2 : 4; + short d2 : 12; +} SSE_Format __attribute__ ((packed)); + +#define s390_emit16(c, x) do \ +{ \ + *((guint16 *) c) = x; \ + c += sizeof(guint16); \ +} while(0) + +#define s390_emit32(c, x) do \ +{ \ + *((guint32 *) c) = x; \ + c += sizeof(guint32); \ +} while(0) + +#define S390_E(c,opc) s390_emit16(c,opc) + +#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm)) + +#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2)) + +#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2)) + +#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2)) + +#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2)) + +#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2)) + +#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RXE(c,opc,g1,n2,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RXY(c,opc,g1,n2,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ + ((((p2) & 0xff000) >> 12) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ + ((((p2) & 0xff000) >> 12) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \ + s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ + ((((p2) & 0xff000) >> 12) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSL(c,opc,ln,s1,p1) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \ + s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff))) + +#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff))) + +#define S390_RIE(c,opc,g1,g3,m2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ + s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \ +} while (0) + +#define S390_RIL_1(c,opc,g1,m2) do \ +{ \ + s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \ + s390_emit32(c, m2); \ +} while (0) + +#define S390_RIL_2(c,opc,k1,m2) do \ +{ \ + s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \ + s390_emit32(c, m2); \ +} while (0) + +#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff))); + +#define S390_SIY(c,opc,s1,p1,m2) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | m2)); \ + s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \ + (opc & 0xff))); \ +} while (0) + +#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff))) + +#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \ +{ \ + s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ + (s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \ +{ \ + s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ + (s2) << 12 | ((p2) & 0xfff))); \ + s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \ +} while (0) + +#define S390_SSE(c,opc,s1,p1,s2,p2) do \ +{ \ + s390_emit16(c, opc); \ + s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + +#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) +#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) +#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) +#define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v) +#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) +#define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2) +#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) +#define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2) +#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) +#define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2) +#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) +#define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d) +#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) +#define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d) +#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) +#define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2) +#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) +#define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2) +#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) +#define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2) +#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) +#define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d) +#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) +#define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d) +#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) +#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) +#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) +#define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2) +#define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2) +#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) +#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) +#define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2) +#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) +#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) +#define s390_dlgr(c, r1, r2) S390_RRE(c, 0xb987, r1, r2) +#define s390_dsgr(c, r1, r2) S390_RRE(c, 0xb90d, r1, r2) +#define s390_dsgfr(c, r1, r2) S390_RRE(c, 0xb91d, r1, r2) +#define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) +#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) +#define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2) +#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) +#define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d) +#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) +#define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2) +#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) +#define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d) +#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) +#define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2) +#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) +#define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d) +#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) +#define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) +#define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) +#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) +#define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2) +#define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2) +#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) +#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) +#define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) +#define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) +#define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) +#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) +#define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2) +#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) +#define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2) +#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) +#define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2) +#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) +#define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d) +#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) +#define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) +#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) +#define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) +#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) +#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) +#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) +#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) +#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) +#define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d) +#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) +#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) +#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) +#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) +#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) +#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) +#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) +#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) +#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) +#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) +#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) +#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) +#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) +#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) +#define s390_break(c) S390_RR(c, 0, 0, 0) +#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) +#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2) +#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) +#define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2) +#define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) +#define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d) +#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) +#define s390_clg(c, r, x, b, d) S390_RXY(c, 0xe321, r, x, b, d) +#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) +#define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i) +#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) +#define s390_j(c,d) s390_brc(c, S390_CC_UN, d) +#define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) +#define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) +#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) +#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) +#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) +#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) +#define s390_jc(c, m, d) s390_brc(c, m, d) +#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) +#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) +#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) +#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) +#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) +#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) +#define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d) +#define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) +#define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d) +#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) +#define s390_srag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0a, r1, r2, b, d) +#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) +#define s390_srlg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0c, r1, r2, b, d) +#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) +#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) +#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) +#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) +#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) +#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) +#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) +#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) +#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) +#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) +#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) +#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) +#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) +#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) +#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) +#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) +#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) +#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) +#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) +#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) +#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) +#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) +#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) +#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) +#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) +#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) +#define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) +#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) +#endif diff --git a/s390x/tramp.c b/s390x/tramp.c new file mode 100644 index 0000000..37b8de5 --- /dev/null +++ b/s390x/tramp.c @@ -0,0 +1,1148 @@ +/*------------------------------------------------------------------*/ +/* */ +/* Name - tramp.c */ +/* */ +/* Function - Create trampolines to invoke arbitrary functions. */ +/* */ +/* Name - Neale Ferguson. */ +/* */ +/* Date - October, 2002 */ +/* */ +/* */ +/*------------------------------------------------------------------*/ + +/*------------------------------------------------------------------*/ +/* D e f i n e s */ +/*------------------------------------------------------------------*/ + +#define PROLOG_INS 24 /* Size of emitted prolog */ +#define CALL_INS 4 /* Size of emitted call */ +#define EPILOG_INS 18 /* Size of emitted epilog */ + +#define DEBUG(x) + +/*========================= End of Defines =========================*/ + +/*------------------------------------------------------------------*/ +/* I n c l u d e s */ +/*------------------------------------------------------------------*/ + +#ifdef NEED_MPROTECT +# include +# include /* for PAGESIZE */ +# ifndef PAGESIZE +# define PAGESIZE 4096 +# endif +#endif + +#include "config.h" +#include +#include +#include "s390x-codegen.h" +#include "mono/metadata/class.h" +#include "mono/metadata/tabledefs.h" +#include "mono/interpreter/interp.h" +#include "mono/metadata/appdomain.h" +#include "mono/metadata/marshal.h" + +/*========================= End of Includes ========================*/ + +/*------------------------------------------------------------------*/ +/* T y p e d e f s */ +/*------------------------------------------------------------------*/ + +/*------------------------------------------------------------------*/ +/* Structure used to accummulate size of stack, code, and locals */ +/*------------------------------------------------------------------*/ +typedef struct { + guint stack_size, + local_size, + code_size, + retStruct; +} size_data; + +/*========================= End of Typedefs ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - add_general */ +/* */ +/* Function - Determine code and stack size incremements for a */ +/* parameter. */ +/* */ +/*------------------------------------------------------------------*/ + +static void inline +add_general (guint *gr, size_data *sz, gboolean simple) +{ + if (simple) { + if (*gr >= GENERAL_REGS) { + sz->stack_size += sizeof(long); + sz->code_size += 12; + } else { + sz->code_size += 8; + } + } else { + if (*gr >= GENERAL_REGS - 1) { + sz->stack_size += 8 + (sz->stack_size % 8); + sz->code_size += 10; + } else { + sz->code_size += 8; + } + (*gr) ++; + } + (*gr) ++; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - calculate_sizes */ +/* */ +/* Function - Determine the amount of space required for code */ +/* and stack. In addition determine starting points */ +/* for stack-based parameters, and area for struct- */ +/* ures being returned on the stack. */ +/* */ +/*------------------------------------------------------------------*/ + +static void inline +calculate_sizes (MonoMethodSignature *sig, size_data *sz, + gboolean string_ctor) +{ + guint i, fr, gr, size; + guint32 simpletype, align; + + fr = 0; + gr = 2; + sz->retStruct = 0; + sz->stack_size = S390_MINIMAL_STACK_SIZE; + sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS); + sz->local_size = 0; + + if (sig->hasthis) { + add_general (&gr, sz, TRUE); + } + + /*----------------------------------------------------------*/ + /* We determine the size of the return code/stack in case we*/ + /* need to reserve a register to be used to address a stack */ + /* area that the callee will use. */ + /*----------------------------------------------------------*/ + + if (sig->ret->byref || string_ctor) { + sz->code_size += 8; + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_R4: + case MONO_TYPE_R8: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + sz->code_size += 4; + break; + case MONO_TYPE_I8: + sz->code_size += 4; + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + gr++; + if (sig->pinvoke) + size = mono_class_native_size (sig->ret->data.klass, &align); + else + size = mono_class_value_size (sig->ret->data.klass, &align); + if (align > 1) + sz->code_size += 10; + switch (size) { + /*----------------------------------*/ + /* On S/390, structures of size 1, */ + /* 2, 4, and 8 bytes are returned */ + /* in (a) register(s). */ + /*----------------------------------*/ + case 1: + case 2: + case 4: + case 8: + sz->code_size += 16; + sz->stack_size += 4; + break; + default: + sz->retStruct = 1; + sz->code_size += 32; + } + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", sig->ret->type); + } + } + + /*----------------------------------------------------------*/ + /* We determine the size of the parameter code and stack */ + /* requirements by checking the types and sizes of the */ + /* parameters. */ + /*----------------------------------------------------------*/ + + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + add_general (&gr, sz, TRUE); + continue; + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_CHAR: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + add_general (&gr, sz, TRUE); + break; + case MONO_TYPE_SZARRAY: + add_general (&gr, sz, TRUE); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (sig->pinvoke) + size = mono_class_native_size (sig->params [i]->data.klass, &align); + else + size = mono_class_value_size (sig->params [i]->data.klass, &align); + DEBUG(printf("%d typesize: %d (%d)\n",i,size,align)); + switch (size) { + /*----------------------------------*/ + /* On S/390, structures of size 1, */ + /* 2, 4, and 8 bytes are passed in */ + /* (a) register(s). */ + /*----------------------------------*/ + case 0: + case 1: + case 2: + case 4: + add_general(&gr, sz, TRUE); + break; + case 8: + add_general(&gr, sz, FALSE); + break; + default: + sz->local_size += (size + (size % align)); + sz->code_size += 40; + } + break; + case MONO_TYPE_I8: + add_general (&gr, sz, FALSE); + break; + case MONO_TYPE_R4: + if (fr < FLOAT_REGS) { + sz->code_size += 4; + fr++; + } + else { + sz->code_size += 4; + sz->stack_size += 8; + } + break; + case MONO_TYPE_R8: + if (fr < FLOAT_REGS) { + sz->code_size += 4; + fr++; + } else { + sz->code_size += 4; + sz->stack_size += 8 + (sz->stack_size % 8); + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + + /* align stack size to 8 */ + DEBUG (printf (" stack size: %d (%d)\n" + " code size: %d\n" + " local size: %d\n", + (sz->stack_size + 8) & ~8, sz->stack_size, + (sz->code_size),(sz->local_size + 8) & ~8)); + sz->stack_size = (sz->stack_size + 8) & ~8; + sz->local_size = (sz->local_size + 8) & ~8; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_prolog */ +/* */ +/* Function - Create the instructions that implement the stand- */ +/* ard function prolog according to the S/390 ABI. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + guint stack_size; + + stack_size = sz->stack_size + sz->local_size; + + /* function prolog */ + s390_stmg(p, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET); + s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS); + s390_lgr (p, s390_r11, STK_BASE); + s390_aghi(p, STK_BASE, -stack_size); + s390_stg (p, s390_r11, 0, STK_BASE, 0); + + /*-----------------------------------------*/ + /* Save: */ + /* - address of "callme" */ + /* - address of "retval" */ + /* - address of "arguments" */ + /*-----------------------------------------*/ + s390_lgr (p, s390_r9, s390_r2); + s390_lgr (p, s390_r8, s390_r3); + s390_lgr (p, s390_r10, s390_r5); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_save_parameters */ +/* */ +/* Function - Create the instructions that load registers with */ +/* parameters, place others on the stack according */ +/* to the S/390 ABI. */ +/* */ +/* The resulting function takes the form: */ +/* void func (void (*callme)(), void *retval, */ +/* void *this_obj, stackval *arguments); */ +/* */ +/*------------------------------------------------------------------*/ + +inline static guint8* +emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + guint i, fr, gr, act_strs, align, + stack_par_pos, size, local_pos; + guint32 simpletype; + + /*----------------------------------------------------------*/ + /* If a structure on stack is being returned, reserve r2 */ + /* to point to an area where it can be passed. */ + /*----------------------------------------------------------*/ + if (sz->retStruct) + gr = 1; + else + gr = 0; + fr = 0; + act_strs = 0; + stack_par_pos = S390_MINIMAL_STACK_SIZE; + local_pos = sz->stack_size; + + if (sig->hasthis) { + s390_lr (p, s390_r2 + gr, s390_r4); + gr++; + } + + act_strs = 0; + for (i = 0; i < sig->param_count; ++i) { + DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref)); + if (sig->params [i]->byref) { + if (gr < GENERAL_REGS) { + s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG); + gr ++; + } else { + s390_lg (p, s390_r0, 0, ARG_BASE, STKARG); + s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + continue; + } + simpletype = sig->params [i]->type; + enum_calc_size: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_PTR: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_SZARRAY: + if (gr < GENERAL_REGS) { + s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG); + gr ++; + } else { + s390_lg (p, s390_r0, 0, ARG_BASE, STKARG); + s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simpletype = sig->params [i]->data.klass->enum_basetype->type; + goto enum_calc_size; + } + if (sig->pinvoke) + size = mono_class_native_size (sig->params [i]->data.klass, &align); + else + size = mono_class_value_size (sig->params [i]->data.klass, &align); + DEBUG(printf("parStruct - size %d pinvoke: %d\n",size,sig->pinvoke)); + switch (size) { + case 0: + case 1: + case 2: + case 4: + if (gr < GENERAL_REGS) { + s390_lg (p, s390_r2 + gr, 0,ARG_BASE, STKARG); + s390_lgf(p, s390_r2 + gr, 0, s390_r2 + gr, 0); + gr++; + } else { + stack_par_pos += (stack_par_pos % align); + s390_lg (p, s390_r10, 0,ARG_BASE, STKARG); + s390_lgf(p, s390_r10, 0, s390_r10, 0); + s390_st (p, s390_r10, 0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + break; + case 8: + if (gr < GENERAL_REGS) { + s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG); + s390_lg (p, s390_r2 + gr, 0, s390_r2 + gr, 0); + } else { + stack_par_pos += (stack_par_pos % align); + s390_lg (p, s390_r10, 0, ARG_BASE, STKARG); + s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0); + stack_par_pos += sizeof(long long); + } + break; + default: + if (size <= 256) { + local_pos += (local_pos % align); + s390_lg (p, s390_r13, 0, ARG_BASE, STKARG); + s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0); + s390_la (p, s390_r13, 0, STK_BASE, local_pos); + local_pos += size; + } else { + local_pos += (local_pos % align); + s390_bras (p, s390_r13, 4); + s390_llong(p, size); + s390_lg (p, s390_r1, 0, s390_r13, 0); + s390_lg (p, s390_r0, 0, ARG_BASE, STKARG); + s390_lgr (p, s390_r14, s390_r12); + s390_la (p, s390_r12, 0, STK_BASE, local_pos); + s390_lgr (p, s390_r13, s390_r1); + s390_mvcl (p, s390_r12, s390_r0); + s390_lgr (p, s390_r12, s390_r14); + s390_la (p, s390_r13, 0, STK_BASE, local_pos); + local_pos += size; + } + if (gr < GENERAL_REGS) { + s390_lgr(p, s390_r2 + gr, s390_r13); + gr++; + } else { + s390_stg(p, s390_r13, 0, STK_BASE, stack_par_pos); + stack_par_pos += sizeof(long); + } + } + break; + case MONO_TYPE_I8: + if (gr < GENERAL_REGS) { + s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG); + gr += 2; + } else { + *(guint32 *) p += 7; + *(guint32 *) p &= ~7; + s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); + } + break; + case MONO_TYPE_R4: + if (fr < FLOAT_REGS) { + s390_le (p, s390_r0 + fr, 0, ARG_BASE, STKARG); + fr++; + } else { + s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(float); + } + break; + case MONO_TYPE_R8: + if (fr < FLOAT_REGS) { + s390_ld (p, s390_r0 + fr, 0, ARG_BASE, STKARG); + fr++; + } else { + *(guint32 *) p += 7; + *(guint32 *) p &= ~7; + s390_mvc (p, sizeof(double), STK_BASE, stack_par_pos, ARG_BASE, STKARG); + stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); + } + break; + default: + g_error ("Can't trampoline 0x%x", sig->params [i]->type); + } + } + + /*----------------------------------------------------------*/ + /* If we're returning a structure but not in a register */ + /* then point the result area for the called routine */ + /*----------------------------------------------------------*/ + if (sz->retStruct) { + s390_lg (p, s390_r2, 0, s390_r8, 0); + } + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - alloc_code_memory */ +/* */ +/* Function - Allocate space to place the emitted code. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +alloc_code_memory (guint code_size) +{ + guint8 *p; + +#ifdef NEED_MPROTECT + p = g_malloc (code_size + PAGESIZE - 1); + + /* Align to a multiple of PAGESIZE, assumed to be a power of two */ + p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1)); +#else + p = g_malloc (code_size); +#endif + DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4)); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_call_and_store_retval */ +/* */ +/* Function - Emit code that will implement the call to the */ +/* desired function, and unload the result according */ +/* to the S390 ABI for the type of value returned */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, + size_data *sz, gboolean string_ctor) +{ + guint32 simpletype; + guint retSize, align; + + /* call "callme" */ + s390_basr (p, s390_r14, s390_r9); + + /* get return value */ + if (sig->ret->byref || string_ctor) { + s390_stg(p, s390_r2, 0, s390_r8, 0); + } else { + simpletype = sig->ret->type; +enum_retvalue: + switch (simpletype) { + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_I1: + case MONO_TYPE_U1: + s390_stc (p, s390_r2, 0, s390_r8, 0); + break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + case MONO_TYPE_CHAR: + s390_sth (p, s390_r2, 0, s390_r8, 0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + case MONO_TYPE_CLASS: + case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: + case MONO_TYPE_ARRAY: + case MONO_TYPE_STRING: + s390_st (p, s390_r2, 0, s390_r8, 0); + break; + case MONO_TYPE_R4: + s390_ste (p, s390_f0, 0, s390_r8, 0); + break; + case MONO_TYPE_R8: + s390_std (p, s390_f0, 0, s390_r8, 0); + break; + case MONO_TYPE_I8: + s390_stg (p, s390_r2, 0, s390_r8, 0); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + if (sig->pinvoke) + retSize = mono_class_native_size (sig->ret->data.klass, &align); + else + retSize = mono_class_value_size (sig->ret->data.klass, &align); +printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); + switch(retSize) { + case 0: + break; + case 1: + s390_stc (p, s390_r2, 0, s390_r8, 0); + break; + case 2: + s390_sth (p, s390_r2, 0, s390_r8, 0); + break; + case 4: + s390_st (p, s390_r2, 0, s390_r8, 0); + break; + case 8: + s390_stg (p, s390_r2, 0, s390_r8, 0); + break; + default: + /*------------------------------------------*/ + /* The callee has already placed the result */ + /* in the required area */ + /*------------------------------------------*/ + } + break; + case MONO_TYPE_VOID: + break; + default: + g_error ("Can't handle as return value 0x%x", + sig->ret->type); + } + } + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - emit_epilog */ +/* */ +/* Function - Create the instructions that implement the stand- */ +/* ard function epilog according to the S/390 ABI. */ +/* */ +/*------------------------------------------------------------------*/ + +static inline guint8 * +emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) +{ + /* function epilog */ + s390_lg (p, STK_BASE, 0, STK_BASE, 0); + s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET); + s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET); + s390_br (p, s390_r4); + + return p; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - mono_arch_create_trampoline. */ +/* */ +/* Function - Create the code that will allow a mono method to */ +/* invoke a system subroutine. */ +/* */ +/*------------------------------------------------------------------*/ + +MonoPIFunc +mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) +{ + guint8 *p, *code_buffer; + size_data sz; + + DEBUG (printf ("\nPInvoke [start emiting]\n")); + calculate_sizes (sig, &sz, string_ctor); + + p = code_buffer = alloc_code_memory (sz.code_size); + p = emit_prolog (p, sig, &sz); + p = emit_save_parameters (p, sig, &sz); + p = emit_call_and_store_retval (p, sig, &sz, string_ctor); + p = emit_epilog (p, sig, &sz); + +#ifdef NEED_MPROTECT + if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) { + g_error ("Cannot mprotect trampoline\n"); + } +#endif + + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); + + DEBUG (printf ("PInvoke [end emiting]\n")); + + return (MonoPIFunc) code_buffer; +} + +/*========================= End of Function ========================*/ + +/*------------------------------------------------------------------*/ +/* */ +/* Name - mono_arch_create_method_pointer */ +/* */ +/* Function - Returns a pointer to a native function that can */ +/* be used to call the specified method. */ +/* */ +/* The function created will receive the arguments */ +/* according to the calling convention specified in */ +/* in the method. */ +/* */ +/* This function works by creating a MonoInvocation */ +/* structure, filling the fields in and calling */ +/* ves_exec_method() on it. */ +/* */ +/* Logic: */ +/* ------ */ +/* mono_arch_create_method_pointer (MonoMethod *method) */ +/* create the unmanaged->managed wrapper */ +/* register it with mono_jit_info_table_add() */ +/* */ +/* What does the unmanaged->managed wrapper do? */ +/* allocate a MonoInvocation structure (inv) on the stack */ +/* allocate an array of stackval on the stack with length = */ +/* method->signature->param_count + 1 [call it stack_args] */ +/* set inv->ex, inv->ex_handler, inv->parent to NULL */ +/* set inv->method to method */ +/* if method is an instance method, set inv->obj to the */ +/* 'this' argument (the first argument) else set to NULL */ +/* for each argument to the method call: */ +/* stackval_from_data (sig->params[i], &stack_args[i], */ +/* arg, sig->pinvoke); */ +/* Where: */ +/* ------ */ +/* sig - is method->signature */ +/* &stack_args[i] - is the pointer to the ith element */ +/* in the stackval array */ +/* arg - is a pointer to the argument re- */ +/* ceived by the function according */ +/* to the call convention. If it */ +/* gets passed in a register, save */ +/* on the stack first. */ +/* */ +/* set inv->retval to the address of the last element of */ +/* stack_args [recall we allocated param_count+1 of them] */ +/* call ves_exec_method(inv) */ +/* copy the returned value from inv->retval where the calling */ +/* convention expects to find it on return from the wrap- */ +/* per [if it's a structure, use stackval_to_data] */ +/* */ +/*------------------------------------------------------------------*/ + +void * +mono_arch_create_method_pointer (MonoMethod *method) +{ + MonoMethodSignature *sig; + MonoJitInfo *ji; + guint8 *p, *code_buffer; + guint i, align = 0, simple_type, retSize, reg_save = 0, + stackval_arg_pos, local_pos, float_pos, + local_start, reg_param = 0, stack_param, + this_flag, arg_pos, fpr_param, parSize; + guint32 simpletype; + size_data sz; + int *vtbuf, cpos, vt_cur; + + sz.code_size = 1024; + sz.stack_size = 1024; + stack_param = 0; + fpr_param = 0; + arg_pos = 0; + + sig = method->signature; + + p = code_buffer = g_malloc (sz.code_size); + + DEBUG (printf ("\nDelegate [start emiting] %s at 0x%08x\n", + method->name,p)); + + /*----------------------------------------------------------*/ + /* prolog */ + /*----------------------------------------------------------*/ + s390_stmg(p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET); + s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS); + s390_lgr (p, s390_r0, STK_BASE); + s390_aghi(p, STK_BASE, -(sz.stack_size+MINV_POS)); + s390_stg (p, s390_r0, 0, STK_BASE, 0); + s390_la (p, s390_r8, 0, STK_BASE, 4); + s390_lgr (p, s390_r10, s390_r8); + s390_lghi(p, s390_r9, sz.stack_size+92); + s390_lghi(p, s390_r11, 0); + s390_mvcl(p, s390_r8, s390_r10); + + /*----------------------------------------------------------*/ + /* Let's fill MonoInvocation - first zero some fields */ + /*----------------------------------------------------------*/ + s390_lghi (p, s390_r0, 0); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); + s390_lghi (p, s390_r0, 1); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); + + /*----------------------------------------------------------*/ + /* set method pointer */ + /*----------------------------------------------------------*/ + s390_bras (p, s390_r13, 4); + s390_llong(p, method); + s390_lg (p, s390_r0, 0, s390_r13, 0); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); + + local_start = local_pos = MINV_POS + + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); + this_flag = (sig->hasthis ? 1 : 0); + + /*----------------------------------------------------------*/ + /* if we are returning a structure, checks it's length to */ + /* see if there's a "hidden" parameter that points to the */ + /* area. If necessary save this hidden parameter for later */ + /*----------------------------------------------------------*/ + if (MONO_TYPE_ISSTRUCT(sig->ret)) { + if (sig->pinvoke) + retSize = mono_class_native_size (sig->ret->data.klass, &align); + else + retSize = mono_class_value_size (sig->ret->data.klass, &align); + switch(retSize) { + case 0: + case 1: + case 2: + case 4: + case 8: + sz.retStruct = 0; + break; + default: + sz.retStruct = 1; + s390_lgr(p, s390_r8, s390_r2); + reg_save = 1; + } + } else { + reg_save = 0; + } + + if (this_flag) { + s390_stg (p, s390_r2 + reg_save, 0, STK_BASE, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + reg_param++; + } else { + s390_stg (p, s390_r2 + reg_save, 0, STK_BASE, local_pos); + local_pos += sizeof(int); + s390_stg (p, s390_r0, 0, STK_BASE, + (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); + } + + s390_stmg (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos); + local_pos += 4 * sizeof(long); + float_pos = local_pos; + s390_std (p, s390_f0, 0, STK_BASE, local_pos); + local_pos += sizeof(double); + s390_std (p, s390_f2, 0, STK_BASE, local_pos); + local_pos += sizeof(double); + + /*----------------------------------------------------------*/ + /* prepare space for valuetypes */ + /*----------------------------------------------------------*/ + vt_cur = local_pos; + vtbuf = alloca (sizeof(int)*sig->param_count); + cpos = 0; + for (i = 0; i < sig->param_count; i++) { + MonoType *type = sig->params [i]; + vtbuf [i] = -1; + DEBUG(printf("par: %d type: %d ref: %d\n",i,type->type,type->byref)); + if (type->type == MONO_TYPE_VALUETYPE) { + MonoClass *klass = type->data.klass; + gint size; + + if (klass->enumtype) + continue; + size = mono_class_native_size (klass, &align); + cpos += align - 1; + cpos &= ~(align - 1); + vtbuf [i] = cpos; + cpos += size; + } + } + cpos += 3; + cpos &= ~3; + + local_pos += cpos; + + /*----------------------------------------------------------*/ + /* set MonoInvocation::stack_args */ + /*----------------------------------------------------------*/ + stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); + s390_la (p, s390_r0, 0, STK_BASE, stackval_arg_pos); + s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); + + /*----------------------------------------------------------*/ + /* add stackval arguments */ + /*----------------------------------------------------------*/ + for (i = 0; i < sig->param_count; ++i) { + if (sig->params [i]->byref) { + ADD_ISTACK_PARM(0, 1); + } else { + simple_type = sig->params [i]->type; + enum_savechk: + switch (simple_type) { + case MONO_TYPE_I8: + ADD_ISTACK_PARM(-1, 2); + break; + case MONO_TYPE_R4: + ADD_RSTACK_PARM(1); + break; + case MONO_TYPE_R8: + ADD_RSTACK_PARM(2); + break; + case MONO_TYPE_VALUETYPE: + if (sig->params [i]->data.klass->enumtype) { + simple_type = sig->params [i]->data.klass->enum_basetype->type; + goto enum_savechk; + } + if (sig->pinvoke) + parSize = mono_class_native_size (sig->params [i]->data.klass, &align); + else + parSize = mono_class_value_size (sig->params [i]->data.klass, &align); + switch(parSize) { + case 0: + case 1: + case 2: + case 4: + ADD_PSTACK_PARM(0, 1); + break; + case 8: + ADD_PSTACK_PARM(-1, 2); + break; + default: + ADD_TSTACK_PARM; + } + break; + default: + ADD_ISTACK_PARM(0, 1); + } + } + + if (vtbuf [i] >= 0) { + s390_la (p, s390_r3, 0, STK_BASE, vt_cur); + s390_stg (p, s390_r3, 0, STK_BASE, stackval_arg_pos); + s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); + vt_cur += vtbuf [i]; + } else { + s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); + } + + /*--------------------------------------*/ + /* Load the parameter registers for the */ + /* call to stackval_from_data */ + /*--------------------------------------*/ + s390_bras (p, s390_r13, 8); + s390_llong(p, sig->params [i]); + s390_llong(p, sig->pinvoke); + s390_llong(p, stackval_from_data); + s390_lg (p, s390_r2, 0, s390_r13, 0); + s390_lg (p, s390_r5, 0, s390_r13, 4); + s390_lg (p, s390_r1, 0, s390_r13, 8); + s390_basr (p, s390_r14, s390_r1); + + stackval_arg_pos += sizeof(stackval); + + /* fixme: alignment */ + DEBUG (printf ("arg_pos %d --> ", arg_pos)); + if (sig->pinvoke) + arg_pos += mono_type_native_stack_size (sig->params [i], &align); + else + arg_pos += mono_type_stack_size (sig->params [i], &align); + + DEBUG (printf ("%d\n", stackval_arg_pos)); + } + + /*----------------------------------------------------------*/ + /* Set return area pointer. */ + /*----------------------------------------------------------*/ + s390_la (p, s390_r10, 0, STK_BASE, stackval_arg_pos); + s390_stg(p, s390_r10, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); + if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { + MonoClass *klass = sig->ret->data.klass; + if (!klass->enumtype) { + s390_la (p, s390_r9, 0, s390_r10, sizeof(stackval)); + s390_st (p, s390_r9, 0,STK_BASE, stackval_arg_pos); + stackval_arg_pos += sizeof(stackval); + } + } + + /*----------------------------------------------------------*/ + /* call ves_exec_method */ + /*----------------------------------------------------------*/ + s390_bras (p, s390_r13, 4); + s390_llong(p, ves_exec_method); + s390_lg (p, s390_r1, 0, s390_r13, 0); + s390_la (p, s390_r2, 0, STK_BASE, MINV_POS); + s390_basr (p, s390_r14, s390_r1); + + /*----------------------------------------------------------*/ + /* move retval from stackval to proper place (r3/r4/...) */ + /*----------------------------------------------------------*/ + DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref)); + if (sig->ret->byref) { + DEBUG (printf ("ret by ref\n")); + s390_stg(p, s390_r2, 0, s390_r10, 0); + } else { + enum_retvalue: + switch (sig->ret->type) { + case MONO_TYPE_VOID: + break; + case MONO_TYPE_BOOLEAN: + case MONO_TYPE_U1: + s390_lghi(p, s390_r2, 0); + s390_ic (p, s390_r2, 0, s390_r10, 0); + break; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + s390_lh (p, s390_r2, 0,s390_r10, 0); + break; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + case MONO_TYPE_I: + case MONO_TYPE_U: + s390_lgf(p, s390_r2, 0, s390_r10, 0); + break; + case MONO_TYPE_OBJECT: + case MONO_TYPE_STRING: + case MONO_TYPE_CLASS: + case MONO_TYPE_I8: + s390_lg (p, s390_r2, 0, s390_r10, 0); + break; + case MONO_TYPE_R4: + s390_le (p, s390_f0, 0, s390_r10, 0); + break; + case MONO_TYPE_R8: + s390_ld (p, s390_f0, 0, s390_r10, 0); + break; + case MONO_TYPE_VALUETYPE: + if (sig->ret->data.klass->enumtype) { + simpletype = sig->ret->data.klass->enum_basetype->type; + goto enum_retvalue; + } + /*---------------------------------*/ + /* Call stackval_to_data to return */ + /* the structure */ + /*---------------------------------*/ + s390_bras (p, s390_r13, 8); + s390_llong(p, sig->ret); + s390_llong(p, sig->pinvoke); + s390_llong(p, stackval_to_data); + s390_lg (p, s390_r2, 0, s390_r13, 0); + s390_lg (p, s390_r3, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); + if (sz.retStruct) { + /*------------------------------------------*/ + /* Get stackval_to_data to set result area */ + /*------------------------------------------*/ + s390_lgr (p, s390_r4, s390_r8); + } else { + /*------------------------------------------*/ + /* Give stackval_to_data a temp result area */ + /*------------------------------------------*/ + s390_la (p, s390_r4, 0, STK_BASE, stackval_arg_pos); + } + s390_lg (p, s390_r5, 0,s390_r13, 4); + s390_lg (p, s390_r1, 0, s390_r13, 8); + s390_basr (p, s390_r14, s390_r1); + switch (retSize) { + case 0: + break; + case 1: + s390_lghi(p, s390_r2, 0); + s390_ic (p, s390_r2, 0, s390_r10, 0); + break; + case 2: + s390_lh (p, s390_r2, 0, s390_r10, 0); + break; + case 4: + s390_lgf(p, s390_r2, 0, s390_r10, 0); + break; + case 8: + s390_lg (p, s390_r2, 0, s390_r10, 0); + break; + default: + /*-------------------------------------------------*/ + /* stackval_to_data has placed data in result area */ + /*-------------------------------------------------*/ + } + break; + default: + g_error ("Type 0x%x not handled yet in thunk creation", + sig->ret->type); + break; + } + } + + /*----------------------------------------------------------*/ + /* epilog */ + /*----------------------------------------------------------*/ + s390_lg (p, STK_BASE, 0, STK_BASE, 0); + s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET); + s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET); + s390_br (p, s390_r4); + + DEBUG (printf ("emited code size: %d\n", p - code_buffer)); + + DEBUG (printf ("Delegate [end emiting]\n")); + + ji = g_new0 (MonoJitInfo, 1); + ji->method = method; + ji->code_size = p - code_buffer; + ji->code_start = code_buffer; + + mono_jit_info_table_add (mono_get_root_domain (), ji); + + return ji->code_start; +} + +/*========================= End of Function ========================*/ -- cgit v1.1 From 17467e9a25e9a1cf71c170fd85e042a5a11a0f05 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Wed, 4 Aug 2004 20:43:11 +0000 Subject: Further 64-bit S/390 updates svn path=/trunk/mono/; revision=31898 --- s390x/s390x-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 797d751..7b41212 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -621,12 +621,16 @@ typedef struct { #define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) #define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) #define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) +#define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2) #define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) #define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2) #define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2) #define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) #define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) #define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) +#define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d) +#define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d) +#define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d) #define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) #define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) #define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) -- cgit v1.1 From ee8712fd77bdd445d98c511a07f29b5136368201 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Thu, 5 Aug 2004 23:28:29 +0000 Subject: Add s390x svn path=/trunk/mono/; revision=31966 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index 876b2fe..9eb9ee8 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 alpha hppa amd64 +DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -- cgit v1.1 From ee4209b85e88e6adfc07a057b41747607235805c Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 6 Aug 2004 16:28:23 +0000 Subject: Support the MEMCPY(base, base) rule and add initial ARGLIST support svn path=/trunk/mono/; revision=31985 --- s390/s390-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index f785ff0..c1cabb5 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -147,7 +147,7 @@ typedef enum { #define S390_PARM_SAVE_OFFSET 8 #define S390_REG_SAVE_OFFSET 24 #define S390_RET_ADDR_OFFSET 56 -#define S390_FLOAT_ADDR_OFFSET 64 +#define S390_FLOAT_SAVE_OFFSET 64 #define S390_CC_ZR 8 #define S390_CC_NE 7 -- cgit v1.1 From 7f2d7df98341055eaf370855c499508599770dec Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Sat, 14 Aug 2004 18:28:26 +0000 Subject: hush cvs svn path=/trunk/mono/; revision=32344 --- s390x/.cvsignore | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 s390x/.cvsignore diff --git a/s390x/.cvsignore b/s390x/.cvsignore new file mode 100644 index 0000000..e9793ab --- /dev/null +++ b/s390x/.cvsignore @@ -0,0 +1,6 @@ +Makefile +Makefile.in +.libs +.deps +*.la +*.lo -- cgit v1.1 From c6a18db1cda9d62eaba7e1095f34eb84e7c39a8b Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 16 Aug 2004 12:58:06 +0000 Subject: 2004-08-16 Zoltan Varga * x86/x86-codegen.h: Add macros for accessing the mod/rm byte. svn path=/trunk/mono/; revision=32365 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index e913f7f..c722780 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-08-16 Zoltan Varga + + * x86/x86-codegen.h: Add macros for accessing the mod/rm byte. + 2004-07-30 Zoltan Varga * amd64/amd64-codegen.h: Ongoing JIT work. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index bc3fd07..6da9ec5 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -242,6 +242,10 @@ typedef union { /* * useful building blocks */ +#define x86_modrm_mod(modrm) ((modrm) >> 6) +#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define x86_modrm_rm(modrm) ((modrm) & 0x7) + #define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0) #define x86_imm_emit32(inst,imm) \ do { \ -- cgit v1.1 From 39a59671ff853ab672d9db1c982093ee1c7cc1f8 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 21 Aug 2004 20:07:37 +0000 Subject: 2004-08-21 Zoltan Varga * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG since under amd64, all 16 registers have a low part. svn path=/trunk/mono/; revision=32632 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 ++ 2 files changed, 7 insertions(+) diff --git a/ChangeLog b/ChangeLog index c722780..42b7e07 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2004-08-21 Zoltan Varga + + * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG + since under amd64, all 16 registers have a low part. + 2004-08-16 Zoltan Varga * x86/x86-codegen.h: Add macros for accessing the mod/rm byte. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index f34fcee..0310995 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -91,6 +91,8 @@ typedef union { #include "../x86/x86-codegen.h" +#undef X86_IS_BYTE_REG +#define X86_IS_BYTE_REG(reg) 1 /* Need to fill this info in for amd64. */ -- cgit v1.1 From 8ca359bb4894521802e1f2044ec55a9aada4c08e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 29 Aug 2004 09:41:22 +0000 Subject: 2004-08-29 Zoltan Varga * amd64/amd64-codegen.h: Add SSE2 instructions. svn path=/trunk/mono/; revision=32991 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 62 +++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 42b7e07..42737c4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-08-29 Zoltan Varga + + * amd64/amd64-codegen.h: Add SSE2 instructions. + 2004-08-21 Zoltan Varga * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 0310995..73f7068 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -74,7 +74,7 @@ typedef enum #define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg))) #define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ { \ unsigned char _amd64_rex_bits = \ (((width) > 4) ? AMD64_REX_W : 0) | \ @@ -82,7 +82,7 @@ typedef enum (((reg_index) > 7) ? AMD64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } + } while (0) typedef union { long val; @@ -488,6 +488,64 @@ typedef union { amd64_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) +/* + * SSE + */ + +#define emit_opcode3(inst,op1,op2,op3) do { \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ +} while (0) + +#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + emit_opcode3 ((inst), (op1), (op2), (op3)); \ + x86_reg_emit ((inst), (dreg), (reg)); \ +} while (0) + +#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) + +#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + emit_opcode3 ((inst), (op1), (op2), (op3)); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ +} while (0) + +#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + emit_opcode3 ((inst), (op1), (op2), (op3)); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ +} while (0) + +#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) + +#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) + +#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) + +#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) + +#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) + +#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) + +#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 0) + +#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, 8) + +#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) + +#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) + +#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) + +#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) + /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_breakpoint(inst); } while (0) -- cgit v1.1 From b0791969d5ddbcb465d86bcd42c86150f653a9a1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 29 Aug 2004 11:11:38 +0000 Subject: 2004-08-29 Zoltan Varga * amd64/amd64-codegen.h: More SSE work. svn path=/trunk/mono/; revision=32992 --- ChangeLog | 2 ++ amd64/amd64-codegen.h | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 42737c4..64a12a4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2004-08-29 Zoltan Varga + * amd64/amd64-codegen.h: More SSE work. + * amd64/amd64-codegen.h: Add SSE2 instructions. 2004-08-21 Zoltan Varga diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 73f7068..95e0beb 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -499,22 +499,28 @@ typedef union { } while (0) #define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ + *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - emit_opcode3 ((inst), (op1), (op2), (op3)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ + *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - emit_opcode3 ((inst), (op1), (op2), (op3)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ + *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ - emit_opcode3 ((inst), (op1), (op2), (op3)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ } while (0) @@ -538,6 +544,10 @@ typedef union { #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, 8) +#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) + +#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) + #define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) #define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) -- cgit v1.1 From e11c33f0ae258eb62dd5fc2e4c6ce12952d25233 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 29 Aug 2004 21:04:04 +0000 Subject: 2004-08-30 Zoltan Varga * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX generation. svn path=/trunk/mono/; revision=33003 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 64a12a4..aaf42f4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2004-08-30 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX + generation. + 2004-08-29 Zoltan Varga * amd64/amd64-codegen.h: More SSE work. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 95e0beb..489795b 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -617,7 +617,7 @@ typedef union { #define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); } while (0) #define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) #define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); } while (0) -#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) #define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); } while (0) #define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); } while (0) #define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); } while (0) -- cgit v1.1 From 3a8f0a20bd939db788d3fd871b4c0ca37a4d0f96 Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Wed, 1 Sep 2004 01:04:04 +0000 Subject: Support short forms of push imm svn path=/trunk/mono/; revision=33128 --- x86/x86-codegen.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6da9ec5..15c6fbf 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1341,10 +1341,18 @@ typedef union { x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \ } while (0) +#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0) + #define x86_push_imm(inst,imm) \ do { \ - *(inst)++ = (unsigned char)0x68; \ - x86_imm_emit32 ((inst), (imm)); \ + int _imm = (int) imm; \ + if (x86_is_imm8 (_imm)) { \ + *(inst)++ = (unsigned char)0x6A; \ + x86_imm_emit8 ((inst), (_imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x68; \ + x86_imm_emit32 ((inst), (_imm)); \ + } \ } while (0) #define x86_pop_reg(inst,reg) \ -- cgit v1.1 From 4c5436f259d4a109ab352f2ec7b7891cdce76cc9 Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Mon, 6 Sep 2004 15:07:37 +0000 Subject: fix warning svn path=/trunk/mono/; revision=33415 --- x86/x86-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 15c6fbf..b372bef 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1345,7 +1345,7 @@ typedef union { #define x86_push_imm(inst,imm) \ do { \ - int _imm = (int) imm; \ + int _imm = (int) (imm); \ if (x86_is_imm8 (_imm)) { \ *(inst)++ = (unsigned char)0x6A; \ x86_imm_emit8 ((inst), (_imm)); \ -- cgit v1.1 From b982bf7e3e3e98afa37544b4a197d406f00b5e5a Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Mon, 8 Nov 2004 03:19:16 +0000 Subject: fix svn path=/trunk/mono/; revision=35803 --- amd64/tramp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/amd64/tramp.c b/amd64/tramp.c index 17183c3..5a4f9a9 100644 --- a/amd64/tramp.c +++ b/amd64/tramp.c @@ -688,14 +688,14 @@ mono_arch_create_method_pointer (MonoMethod *method) * If it is a static P/Invoke method, we can just return the pointer * to the method implementation. */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && method->addr) { + if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && ((MonoMethodPInvoke*) method)->addr) { ji = g_new0 (MonoJitInfo, 1); ji->method = method; ji->code_size = 1; - ji->code_start = method->addr; + ji->code_start = ((MonoMethodPInvoke*) method)->addr; mono_jit_info_table_add (mono_get_root_domain (), ji); - return method->addr; + return ((MonoMethodPInvoke*) method)->addr; } sig = method->signature; -- cgit v1.1 From 149905478e1af4189a0cd9cf3f0e294dbb2bccbc Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 15 Nov 2004 19:00:05 +0000 Subject: 2004-11-15 Zoltan Varga * amd64/x86-64-codegen.h: Get rid of this. svn path=/trunk/mono/; revision=36145 --- ChangeLog | 4 + amd64/x86-64-codegen.h | 409 ------------------------------------------------- 2 files changed, 4 insertions(+), 409 deletions(-) delete mode 100644 amd64/x86-64-codegen.h diff --git a/ChangeLog b/ChangeLog index aaf42f4..02daa51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-11-15 Zoltan Varga + + * amd64/x86-64-codegen.h: Get rid of this. + 2004-08-30 Zoltan Varga * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX diff --git a/amd64/x86-64-codegen.h b/amd64/x86-64-codegen.h deleted file mode 100644 index 68bcfec..0000000 --- a/amd64/x86-64-codegen.h +++ /dev/null @@ -1,409 +0,0 @@ -/* - * amd64-codegen.h: Macros for generating x86 code - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Intel Corporation (ORP Project) - * Sergey Chaban (serge@wildwestsoftware.com) - * Dietmar Maurer (dietmar@ximian.com) - * Patrik Torstensson - * Zalman Stern - * - * Not all routines are done for AMD64. Much could also be removed from here if supporting tramp.c is the only goal. - * - * Copyright (C) 2000 Intel Corporation. All rights reserved. - * Copyright (C) 2001, 2002 Ximian, Inc. - */ - -#ifndef AMD64_H -#define AMD64_H - -typedef enum { - AMD64_RAX = 0, - AMD64_RCX = 1, - AMD64_RDX = 2, - AMD64_RBX = 3, - AMD64_RSP = 4, - AMD64_RBP = 5, - AMD64_RSI = 6, - AMD64_RDI = 7, - AMD64_R8 = 8, - AMD64_R9 = 9, - AMD64_R10 = 10, - AMD64_R11 = 11, - AMD64_R12 = 12, - AMD64_R13 = 13, - AMD64R_14 = 14, - AMD64_R15 = 15, - AMD64_NREG -} AMD64_Reg_No; - -typedef enum { - AMD64_XMM0 = 0, - AMD64_XMM1 = 1, - AMD64_XMM2 = 2, - AMD64_XMM3 = 3, - AMD64_XMM4 = 4, - AMD64_XMM5 = 5, - AMD64_XMM6 = 6, - AMD64_XMM8 = 8, - AMD64_XMM9 = 9, - AMD64_XMM10 = 10, - AMD64_XMM11 = 11, - AMD64_XMM12 = 12, - AMD64_XMM13 = 13, - AMD64_XMM14 = 14, - AMD64_XMM15 = 15, - AMD64_XMM_NREG = 16, -} AMD64_XMM_Reg_No; - -typedef enum -{ - AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ - AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ - AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ - AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ -} AMD64_REX_Bits; - -#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits))) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) \ - { \ - unsigned char _amd64_rex_bits = \ - (((width) > 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } - -typedef union { - long val; - unsigned char b [8]; -} amd64_imm_buf; - -#include "../x86/x86-codegen.h" - - -/* Need to fill this info in for amd64. */ - -#if 0 -/* -// bitvector mask for callee-saved registers -*/ -#define X86_ESI_MASK (1< Date: Wed, 17 Nov 2004 03:05:28 +0000 Subject: Add support for siginfo_t as a parameter to mono_arch_is_int_overflow. Support this routine in s390. svn path=/trunk/mono/; revision=36188 --- s390/ChangeLog | 5 ++++- s390/s390-codegen.h | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/s390/ChangeLog b/s390/ChangeLog index 6d033e7..1f01e89 100644 --- a/s390/ChangeLog +++ b/s390/ChangeLog @@ -1,4 +1,7 @@ +2004-11-15 Neale Ferguson + + * s390-codegen.h: Minor macro modifications + 2004-07-30 Neale Ferguson * s390-codegen.h: reworked macros for code generation. - diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index c1cabb5..1742d20 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -137,9 +137,9 @@ typedef enum { #define s390_is_imm16(val) ((gint)val >= (gint)-(1<<15) && \ (gint)val <= (gint)((1<<15)-1)) -#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) +#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 32767) #define s390_is_imm12(val) ((gint)val >= (gint)-(1<<11) && \ - (gint)val <= (gint)((1<<15)-1)) + (gint)val <= (gint)((1<<11)-1)) #define s390_is_uimm12(val) ((gint)val >= 0 && (gint)val <= 4095) #define STK_BASE s390_r15 @@ -573,6 +573,8 @@ typedef struct { #define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) #define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) #define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) +#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d) +#define s390_slb(c, r, x, b, d) S390_RXY(c, 0xe399, r, x, b, d) #define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) #define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) #define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) -- cgit v1.1 From 3e56873e56ee01f0195683a20bd44e0fd03db4ee Mon Sep 17 00:00:00 2001 From: Patrik Torstensson Date: Thu, 18 Nov 2004 18:44:57 +0000 Subject: 2004-11-16 Patrik Torstensson * x86/x86-codegen.h: added opcodes for xadd instructions svn path=/trunk/mono/; revision=36283 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/ChangeLog b/ChangeLog index 02daa51..168f859 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2004-11-16 Patrik Torstensson + + * x86/x86-codegen.h: added opcodes for xadd instructions + 2004-11-15 Zoltan Varga * amd64/x86-64-codegen.h: Get rid of this. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index b372bef..8cf3e80 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -423,6 +423,36 @@ typedef union { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define x86_xadd_reg_reg(inst,dreg,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_xadd_mem_reg(inst,mem,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + #define x86_inc_mem(inst,mem) \ do { \ *(inst)++ = (unsigned char)0xff; \ -- cgit v1.1 From da4b0970bffc8f281679bddf7371679910d0a23c Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 19 Nov 2004 15:04:41 +0000 Subject: Fri Nov 19 17:29:22 CET 2004 Paolo Molaro * ppc/ppc-codegen.h: counter reg decrement branch values (patch by Geoff Norton ). svn path=/trunk/mono/; revision=36320 --- ChangeLog | 6 ++++++ ppc/ppc-codegen.h | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/ChangeLog b/ChangeLog index 168f859..83f1dd2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ + +Fri Nov 19 17:29:22 CET 2004 Paolo Molaro + + * ppc/ppc-codegen.h: counter reg decrement branch values + (patch by Geoff Norton ). + 2004-11-16 Patrik Torstensson * x86/x86-codegen.h: added opcodes for xadd instructions diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 27c8690..aa85d45 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -88,9 +88,15 @@ typedef enum { enum { /* B0 operand for branches */ + PPC_BR_DEC_CTR_NONZERO_FALSE = 0, PPC_BR_LIKELY = 1, /* can be or'ed with the conditional variants */ + PPC_BR_DEC_CTR_ZERO_FALSE = 2, PPC_BR_FALSE = 4, + PPC_BR_DEC_CTR_NONZERO_TRUE = 8, + PPC_BR_DEC_CTR_ZERO_TRUE = 10, PPC_BR_TRUE = 12, + PPC_BR_DEC_CTR_NONZERO = 16, + PPC_BR_DEC_CTR_ZERO = 18, PPC_BR_ALWAYS = 20, /* B1 operand for branches */ PPC_BR_LT = 0, -- cgit v1.1 From c523c66bf11c9c05df3d77d42f8be9821ad558e5 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 25 Nov 2004 13:32:53 +0000 Subject: 2004-11-25 Zoltan Varga * amd64/amd64-codegen.h: Updates to support the PIC changes. svn path=/trunk/mono/; revision=36549 --- ChangeLog | 3 +++ amd64/amd64-codegen.h | 23 ++++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/ChangeLog b/ChangeLog index 83f1dd2..997b5e8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2004-11-25 Zoltan Varga + + * amd64/amd64-codegen.h: Updates to support the PIC changes. Fri Nov 19 17:29:22 CET 2004 Paolo Molaro diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 489795b..b2b9de6 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -177,6 +177,15 @@ typedef union { *(inst)++ = imb.b [7]; \ } while (0) +#define amd64_membase_emit(inst,reg,basereg,disp) do { \ + if ((basereg) == AMD64_RIP) { \ + x86_address_byte ((inst), 0, (reg)&0x7, 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + else \ + x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ +} while (0) + #define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ do { \ if ((reg) == X86_EAX) { \ @@ -274,7 +283,7 @@ typedef union { case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ @@ -346,15 +355,6 @@ typedef union { } \ } while (0) -#define amd64_membase_emit(inst,reg,basereg,disp) do { \ - if ((basereg) == AMD64_RIP) { \ - x86_address_byte ((inst), 0, (reg)&0x7, 5); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - else \ - x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ -} while (0) - #define amd64_lea_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ @@ -487,6 +487,8 @@ typedef union { *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ amd64_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) + +#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) /* * SSE @@ -717,7 +719,6 @@ typedef union { #define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) -#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_call_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_call_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_code((inst),(target)); } while (0) //#define amd64_ret_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); } while (0) #define amd64_ret_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); } while (0) -- cgit v1.1 From c7b8d172d479d75da8d183f9491e4651bbc5b4f7 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 7 Dec 2004 04:18:03 +0000 Subject: Fix atomic operations and add initial support for tls support. svn path=/trunk/mono/; revision=37284 --- s390/s390-codegen.h | 201 ++++++++++++++++++----------------- s390x/s390x-codegen.h | 289 +++++++++++++++++++++++++------------------------- 2 files changed, 246 insertions(+), 244 deletions(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index 1742d20..e219407 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -564,125 +564,126 @@ typedef struct { s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ } while (0) -#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) -#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) -#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) +#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) +#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) +#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) +#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) #define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) +#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d) #define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) -#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) -#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) -#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) #define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) -#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d) -#define s390_slb(c, r, x, b, d) S390_RXY(c, 0xe399, r, x, b, d) -#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) -#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) -#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) -#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) -#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) -#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) -#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) -#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) -#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) -#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) -#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) -#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) +#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) +#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) +#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) +#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) +#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) #define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) -#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) -#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) -#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) -#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) -#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) -#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) -#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) -#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) -#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) -#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) -#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) -#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) -#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) -#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) -#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) -#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) -#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) -#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) -#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) -#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) -#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) -#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) -#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) -#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) -#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) -#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) -#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) -#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) -#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) -#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) -#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) -#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) -#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) #define s390_break(c) S390_RR(c, 0, 0, 0) -#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) -#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) -#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) -#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) #define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) -#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) +#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) +#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) +#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) #define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) -#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) +#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) +#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) +#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) +#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) +#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) +#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) +#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) +#define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2) +#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) +#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) +#define s390_jc(c, m, d) s390_brc(c, m, d) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) +#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) -#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) -#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) -#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) #define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) #define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) +#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) #define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) -#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) #define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) -#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) -#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) -#define s390_jc(c, m, d) s390_brc(c, m, d) -#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) +#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) +#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) +#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) +#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) +#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) +#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) +#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) +#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) +#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) +#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) +#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) +#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) +#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) +#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) +#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) +#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) +#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) +#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) +#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) +#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) +#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) +#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) +#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) +#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) +#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) +#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) +#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) +#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) +#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) +#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) +#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) +#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) +#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) +#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) +#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) +#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) +#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) +#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) +#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) +#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) +#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) +#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) +#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) +#define s390_slb(c, r, x, b, d) S390_RXY(c, 0xe399, r, x, b, d) +#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) +#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) #define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) #define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) -#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) -#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) -#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) #define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) -#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) -#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) +#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) #define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) #define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) -#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) -#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) -#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) -#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) -#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) -#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) -#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) -#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) -#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) -#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) -#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) -#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) -#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) -#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) -#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) -#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) -#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) -#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) -#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) -#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) -#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) -#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) -#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) -#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) -#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) +#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) +#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) +#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) +#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) +#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) +#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) +#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) +#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) +#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) +#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) +#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) +#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) +#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) #define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) #define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) -#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) +#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) +#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) #endif diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 7b41212..456f840 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -567,175 +567,176 @@ typedef struct { s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ } while (0) -#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) -#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) -#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) -#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) -#define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v) -#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) -#define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2) -#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) -#define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2) -#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) -#define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2) #define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) +#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) +#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) +#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) #define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d) +#define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v) +#define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2) +#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) +#define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2) +#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) #define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) #define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d) -#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) -#define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2) -#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) -#define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2) -#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) -#define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2) -#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) -#define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d) -#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) -#define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d) -#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) -#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) -#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) -#define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2) -#define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2) -#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) -#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) -#define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2) -#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) -#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) -#define s390_dlgr(c, r1, r2) S390_RRE(c, 0xb987, r1, r2) -#define s390_dsgr(c, r1, r2) S390_RRE(c, 0xb90d, r1, r2) -#define s390_dsgfr(c, r1, r2) S390_RRE(c, 0xb91d, r1, r2) +#define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2) +#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) +#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) +#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) +#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) +#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) #define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) -#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) -#define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2) -#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) -#define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d) -#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) -#define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2) -#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) -#define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d) -#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) -#define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2) -#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) -#define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d) -#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) -#define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) -#define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) -#define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2) -#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) -#define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2) -#define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2) -#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) -#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) -#define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) -#define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d) -#define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d) -#define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d) -#define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) -#define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) -#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) -#define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2) -#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) -#define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2) -#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) -#define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2) -#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) -#define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d) -#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) -#define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) -#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) -#define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) -#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) -#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) -#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) -#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) -#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) -#define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d) -#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) -#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) -#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) -#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) -#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) -#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) -#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) -#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) -#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) -#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) -#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) -#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) -#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) -#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) #define s390_break(c) S390_RR(c, 0, 0, 0) -#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) -#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) -#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) -#define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2) -#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) -#define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2) #define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) +#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) +#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) +#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) #define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d) +#define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i) +#define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2) +#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) #define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) #define s390_clg(c, r, x, b, d) S390_RXY(c, 0xe321, r, x, b, d) -#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) -#define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i) -#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) +#define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2) +#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) +#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) +#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) +#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) +#define s390_dlgr(c, r1, r2) S390_RRE(c, 0xb987, r1, r2) +#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) +#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) +#define s390_dsgfr(c, r1, r2) S390_RRE(c, 0xb91d, r1, r2) +#define s390_dsgr(c, r1, r2) S390_RRE(c, 0xb90d, r1, r2) +#define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2) +#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) +#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) +#define s390_jc(c, m, d) s390_brc(c, m, d) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) +#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) -#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) -#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) -#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) #define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) #define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) +#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) #define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) -#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) #define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) -#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) -#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) -#define s390_jc(c, m, d) s390_brc(c, m, d) -#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) -#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) -#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) -#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) -#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) +#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) +#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) +#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) +#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) +#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) +#define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) +#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) +#define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2) +#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) +#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) +#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) +#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) +#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) +#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) +#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) +#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) +#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) +#define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) +#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) +#define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) +#define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) +#define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) +#define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) +#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) +#define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) +#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) +#define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d) +#define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d) +#define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2) +#define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d) +#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) +#define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d) +#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) +#define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2) +#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) +#define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2) +#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) +#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) +#define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2) +#define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2) +#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) +#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) +#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) +#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) +#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) +#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) +#define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2) +#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) +#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) +#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) +#define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2) +#define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2) +#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) +#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) +#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) +#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) +#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) +#define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d) +#define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2) +#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) +#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) +#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) +#define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d) +#define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2) +#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) +#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) +#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) +#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) +#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) +#define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d) +#define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2) #define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) #define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d) +#define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2) +#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) +#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) +#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) +#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) +#define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d) +#define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2) #define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) #define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d) +#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) +#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) +#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) #define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) #define s390_srag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0a, r1, r2, b, d) +#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) +#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) +#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) #define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) #define s390_srlg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0c, r1, r2, b, d) -#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) -#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) -#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) -#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) -#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) -#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) -#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) -#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) -#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) -#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) -#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) -#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) -#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) -#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) -#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) -#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) -#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) -#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) -#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) -#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) -#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) -#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) -#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) -#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) -#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) -#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) -#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) +#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) +#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) +#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) +#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) +#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) +#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) +#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) +#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) +#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) +#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) +#define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d) #define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) #define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) -#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) +#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) +#define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d) +#define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2) +#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) #endif -- cgit v1.1 From 9f3d964963eac63f42db702fe80cbfa89e3a73b4 Mon Sep 17 00:00:00 2001 From: Raja R Harinath Date: Mon, 13 Dec 2004 06:05:53 +0000 Subject: remove svn:executable from *.cs *.c *.h svn path=/trunk/mono/; revision=37682 --- arm/arm-wmmx.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 arm/arm-wmmx.h diff --git a/arm/arm-wmmx.h b/arm/arm-wmmx.h old mode 100755 new mode 100644 -- cgit v1.1 From 0c1ce771e696eabde58e35deb64c0b578be7a92d Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Mon, 10 Jan 2005 21:13:14 +0000 Subject: - Fix atomic ops on s390 - Implement OP_ATOMIC_xxx operations on s390 - Standardize exception handling on s390 with other platforms - Enable out of line bblock support - Check vtable slot belongs to domain when building trampoline svn path=/trunk/mono/; revision=38647 --- s390/ChangeLog | 4 ++++ s390/s390-codegen.h | 2 ++ s390x/ChangeLog | 5 ++++- s390x/s390x-codegen.h | 6 ++++++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/s390/ChangeLog b/s390/ChangeLog index 1f01e89..5186c80 100644 --- a/s390/ChangeLog +++ b/s390/ChangeLog @@ -1,3 +1,7 @@ +2004-12-15 Neale Ferguson + + * s390-codegen.h: Add some new instructions (CS, CDS) + 2004-11-15 Neale Ferguson * s390-codegen.h: Minor macro modifications diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index e219407..38c7cbe 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -584,12 +584,14 @@ typedef struct { #define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) #define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) #define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d) #define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) #define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) #define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) #define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) #define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) #define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d) #define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) #define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) #define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index c65fce5..1cb7ef6 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,4 +1,7 @@ +2004-12-15 Neale Ferguson + + * s390x-codegen.h: Add some new instructions (CS, CSG, CSY, CDS, CDSG, CDSY) + 2004-08-03 Neale Ferguson * s390x-codegen.h Makefile.am tramp.c: S/390 64-bit interpreter - diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 456f840..ce9281c 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -592,6 +592,9 @@ typedef struct { #define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) #define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) #define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d) +#define s390_cdsg(c, r1, r2, b, d) S390_RSY(c, 0xeb3e, r1, r2, b, d) +#define s390_cdsy(c, r1, r2, b, d) S390_RSY(c, 0xeb31, r1, r2, b, d) #define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) #define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) #define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d) @@ -603,6 +606,9 @@ typedef struct { #define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2) #define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) #define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d) +#define s390_csg(c, r1, r2, b, d) S390_RSY(c, 0xeb30, r1, r2, b, d) +#define s390_csy(c, r1, r2, b, d) S390_RSY(c, 0xeb14, r1, r2, b, d) #define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) #define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) #define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) -- cgit v1.1 From 2cf88a5c39f13e54cc5e5f95ab6021924077c1d8 Mon Sep 17 00:00:00 2001 From: Ben Maurer Date: Wed, 16 Feb 2005 04:43:00 +0000 Subject: remove .cvsignore, as this is not used anymore svn path=/trunk/mono/; revision=40731 --- .cvsignore | 6 ------ alpha/.cvsignore | 4 ---- amd64/.cvsignore | 3 --- arm/.cvsignore | 13 ------------- hppa/.cvsignore | 3 --- ppc/.cvsignore | 7 ------- s390/.cvsignore | 4 ---- s390x/.cvsignore | 6 ------ sparc/.cvsignore | 3 --- x86/.cvsignore | 6 ------ 10 files changed, 55 deletions(-) delete mode 100644 .cvsignore delete mode 100644 alpha/.cvsignore delete mode 100644 amd64/.cvsignore delete mode 100644 arm/.cvsignore delete mode 100644 hppa/.cvsignore delete mode 100644 ppc/.cvsignore delete mode 100644 s390/.cvsignore delete mode 100644 s390x/.cvsignore delete mode 100644 sparc/.cvsignore delete mode 100644 x86/.cvsignore diff --git a/.cvsignore b/.cvsignore deleted file mode 100644 index 0b27fc3..0000000 --- a/.cvsignore +++ /dev/null @@ -1,6 +0,0 @@ -Makefile -Makefile.in -.deps -.libs -*.la -*.lo \ No newline at end of file diff --git a/alpha/.cvsignore b/alpha/.cvsignore deleted file mode 100644 index 6358454..0000000 --- a/alpha/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -Makefile.in -Makefile -.deps -.cvsignore diff --git a/amd64/.cvsignore b/amd64/.cvsignore deleted file mode 100644 index e440faf..0000000 --- a/amd64/.cvsignore +++ /dev/null @@ -1,3 +0,0 @@ -Makefile.in -Makefile -.deps diff --git a/arm/.cvsignore b/arm/.cvsignore deleted file mode 100644 index 3a221ac..0000000 --- a/arm/.cvsignore +++ /dev/null @@ -1,13 +0,0 @@ -Makefile -Makefile.in -.deps -.libs -*.o -*.la -*.lo -*.lib -*.obj -*.exe -*.dll -arm_dpimacros.h -fixeol.sh \ No newline at end of file diff --git a/hppa/.cvsignore b/hppa/.cvsignore deleted file mode 100644 index 051d1bd..0000000 --- a/hppa/.cvsignore +++ /dev/null @@ -1,3 +0,0 @@ -Makefile -Makefile.in -.deps diff --git a/ppc/.cvsignore b/ppc/.cvsignore deleted file mode 100644 index 3c6240d..0000000 --- a/ppc/.cvsignore +++ /dev/null @@ -1,7 +0,0 @@ -Makefile -Makefile.in -.libs -.deps -*.la -*.lo -test \ No newline at end of file diff --git a/s390/.cvsignore b/s390/.cvsignore deleted file mode 100644 index 6358454..0000000 --- a/s390/.cvsignore +++ /dev/null @@ -1,4 +0,0 @@ -Makefile.in -Makefile -.deps -.cvsignore diff --git a/s390x/.cvsignore b/s390x/.cvsignore deleted file mode 100644 index e9793ab..0000000 --- a/s390x/.cvsignore +++ /dev/null @@ -1,6 +0,0 @@ -Makefile -Makefile.in -.libs -.deps -*.la -*.lo diff --git a/sparc/.cvsignore b/sparc/.cvsignore deleted file mode 100644 index 051d1bd..0000000 --- a/sparc/.cvsignore +++ /dev/null @@ -1,3 +0,0 @@ -Makefile -Makefile.in -.deps diff --git a/x86/.cvsignore b/x86/.cvsignore deleted file mode 100644 index e9793ab..0000000 --- a/x86/.cvsignore +++ /dev/null @@ -1,6 +0,0 @@ -Makefile -Makefile.in -.libs -.deps -*.la -*.lo -- cgit v1.1 From c7a5bc7b7055832a36dc63ba67ad7add33a95d06 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 20 Feb 2005 14:16:51 +0000 Subject: 2005-02-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex. svn path=/trunk/mono/; revision=40934 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 997b5e8..3335e26 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-02-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex. + 2004-11-25 Zoltan Varga * amd64/amd64-codegen.h: Updates to support the PIC changes. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index b2b9de6..209307c 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -707,7 +707,7 @@ typedef union { #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_jump_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_code((inst),(target)); } while (0) +#define amd64_jump_code_size(inst,target,size) do { x86_jump_code((inst),(target)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_disp((inst),(disp)); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -- cgit v1.1 From b175669d7abc2f7e83940305cf2cb1f7663569b0 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 20 Feb 2005 18:48:25 +0000 Subject: 2005-02-20 Zoltan Varga * amd64/amd64-codegen.h: Add xadd instructions. svn path=/trunk/mono/; revision=40956 --- ChangeLog | 2 ++ amd64/amd64-codegen.h | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3335e26..b830f6e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2005-02-20 Zoltan Varga + * amd64/amd64-codegen.h: Add xadd instructions. + * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex. 2004-11-25 Zoltan Varga diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 209307c..2a91fbc 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -742,11 +742,9 @@ typedef union { //#define amd64_padding_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_padding((inst),(size)); } while (0) #define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); } while (0) #define amd64_epilog_size(inst,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); } while (0) - - - - - +#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); } while (0) +#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); } while (0) +#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); } while (0) @@ -767,6 +765,9 @@ typedef union { #define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) #define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) #define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) +#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) +#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) #define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) #define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) -- cgit v1.1 From 3c4a8677815d2ad4e0b47b809ca16b43f33e3f96 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 6 Mar 2005 21:25:22 +0000 Subject: 2005-03-06 Zoltan Varga * amd64/amd64-codegen.h: Add some SSE2 instructions. svn path=/trunk/mono/; revision=41491 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 8 ++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index b830f6e..4e6bf5c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-03-06 Zoltan Varga + + * amd64/amd64-codegen.h: Add some SSE2 instructions. + 2005-02-20 Zoltan Varga * amd64/amd64-codegen.h: Add xadd instructions. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 2a91fbc..ddd9486 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -542,8 +542,12 @@ typedef union { #define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) +#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) + #define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 0) +#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, 0) + #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, 8) #define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) @@ -713,13 +717,13 @@ typedef union { #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) #define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) #define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) #define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) -#define amd64_call_code_size(inst,target,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_code((inst),(target)); } while (0) +#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) //#define amd64_ret_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); } while (0) #define amd64_ret_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); } while (0) #define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); } while (0) -- cgit v1.1 From ee4c2805588b6d8291ac4349a520ca9c99050b59 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 8 Mar 2005 09:28:19 +0000 Subject: 2005-03-08 Zoltan Varga * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert to a 64 bit value. svn path=/trunk/mono/; revision=41554 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4e6bf5c..4e48d71 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2005-03-08 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert + to a 64 bit value. + 2005-03-06 Zoltan Varga * amd64/amd64-codegen.h: Add some SSE2 instructions. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index ddd9486..de9779b 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -544,9 +544,9 @@ typedef union { #define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) -#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 0) +#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) -#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, 0) +#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, 8) #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, 8) -- cgit v1.1 From ad5014de38c4bde6ef12a04bbbcdf0303ac8acc1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 8 Mar 2005 11:11:38 +0000 Subject: 2005-03-08 Zoltan Varga * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size variants to some sse2 macros. svn path=/trunk/mono/; revision=41557 --- ChangeLog | 3 +++ amd64/amd64-codegen.h | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4e48d71..cdcf06e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,8 @@ 2005-03-08 Zoltan Varga + * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size + variants to some sse2 macros. + * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert to a 64 bit value. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index de9779b..f705aec 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -546,9 +546,13 @@ typedef union { #define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) -#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, 8) +#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) -#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, 8) +#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) #define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) -- cgit v1.1 From d23ce2f6ba82d598af825e20b95cf7938ff5bc39 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 13 Mar 2005 16:57:42 +0000 Subject: 2005-03-13 Zoltan Varga * amd64/amd64-codegen.h: Remove some unneccesary REXes. svn path=/trunk/mono/; revision=41765 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index cdcf06e..e13927c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-03-13 Zoltan Varga + + * amd64/amd64-codegen.h: Remove some unneccesary REXes. + 2005-03-08 Zoltan Varga * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index f705aec..3a86bc7 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -699,7 +699,7 @@ typedef union { #define amd64_push_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); } while (0) //#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); } while (0) -#define amd64_push_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_imm((inst),(imm)); } while (0) +#define amd64_push_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); } while (0) //#define amd64_pop_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); } while (0) #define amd64_pop_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); } while (0) #define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); } while (0) @@ -716,7 +716,7 @@ typedef union { #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { x86_jump_code((inst),(target)); } while (0) -#define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_disp((inst),(disp)); } while (0) +#define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) -- cgit v1.1 From f7074904827b639bb500dcb92c481ec9f35a88a0 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 14 Mar 2005 15:17:54 +0000 Subject: 2005-03-14 Zoltan Varga * amd64/amd64-codegen.h: Add missing AMD64_XMM7. svn path=/trunk/mono/; revision=41795 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index e13927c..dd1625f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-03-14 Zoltan Varga + + * amd64/amd64-codegen.h: Add missing AMD64_XMM7. + 2005-03-13 Zoltan Varga * amd64/amd64-codegen.h: Remove some unneccesary REXes. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 3a86bc7..7b57ba5 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -45,6 +45,7 @@ typedef enum { AMD64_XMM4 = 4, AMD64_XMM5 = 5, AMD64_XMM6 = 6, + AMD64_XMM7 = 7, AMD64_XMM8 = 8, AMD64_XMM9 = 9, AMD64_XMM10 = 10, -- cgit v1.1 From 242ec30220c85e3f69a1dd1d50469771c4ba7047 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 15 Mar 2005 17:08:39 +0000 Subject: 2005-03-15 Zoltan Varga * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro. svn path=/trunk/mono/; revision=41842 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 3 +++ 2 files changed, 7 insertions(+) diff --git a/ChangeLog b/ChangeLog index dd1625f..727a0f9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-03-15 Zoltan Varga + + * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro. + 2005-03-14 Zoltan Varga * amd64/amd64-codegen.h: Add missing AMD64_XMM7. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 7b57ba5..a00782f 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -74,6 +74,9 @@ typedef enum #define AMD64_CALLEE_SAVED_REGS ((1< Date: Tue, 15 Mar 2005 19:47:29 +0000 Subject: 2005-03-15 Zoltan Varga * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the byte registers. svn path=/trunk/mono/; revision=41848 --- ChangeLog | 3 +++ amd64/amd64-codegen.h | 68 +++------------------------------------------------ 2 files changed, 6 insertions(+), 65 deletions(-) diff --git a/ChangeLog b/ChangeLog index 727a0f9..278035c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,8 @@ 2005-03-15 Zoltan Varga + * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the + byte registers. + * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro. 2005-03-14 Zoltan Varga diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index a00782f..1ecc330 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -74,9 +74,6 @@ typedef enum #define AMD64_CALLEE_SAVED_REGS ((1< 7) ? AMD64_REX_R : 0) | \ (((reg_index) > 7) ? AMD64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if (_amd64_rex_bits != 0) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + if ((_amd64_rex_bits != 0) || ((width == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ } while (0) typedef union { @@ -95,69 +92,10 @@ typedef union { #include "../x86/x86-codegen.h" +/* In 64 bit mode, all registers have a low byte subregister */ #undef X86_IS_BYTE_REG #define X86_IS_BYTE_REG(reg) 1 -/* Need to fill this info in for amd64. */ - -#if 0 -/* -// bitvector mask for callee-saved registers -*/ -#define X86_ESI_MASK (1<> 6) #define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) #define amd64_modrm_rm(modrm) ((modrm) & 0x7) @@ -725,7 +663,7 @@ typedef union { #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) #define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) #define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) #define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) -- cgit v1.1 From 293459dd29bdd85542f499e0530c9504ced01604 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 28 Mar 2005 21:09:11 +0000 Subject: 2005-03-28 Zoltan Varga * amd64/amd64-codegen.h: Avoid emitting a rex in some places. svn path=/trunk/mono/; revision=42316 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index 278035c..5e9b2e5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-03-28 Zoltan Varga + + * amd64/amd64-codegen.h: Avoid emitting a rex in some places. + 2005-03-15 Zoltan Varga * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 1ecc330..88ee5b5 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -82,7 +82,7 @@ typedef enum (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ (((reg_index) > 7) ? AMD64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if ((_amd64_rex_bits != 0) || ((width == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ } while (0) typedef union { @@ -281,7 +281,7 @@ typedef union { do { \ if ((size) == 2) \ *(inst)++ = (unsigned char)0x66; \ - amd64_emit_rex(inst, (size), 0, 0, (basereg)); \ + amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ @@ -538,7 +538,7 @@ typedef union { #define amd64_neg_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); } while (0) #define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_neg_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); } while (0) -#define amd64_nop_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_nop(inst); } while (0) +#define amd64_nop_size(inst,size) do { x86_nop(inst); } while (0) //#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) #define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); } while (0) #define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) @@ -664,9 +664,9 @@ typedef union { #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) #define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) -#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) -#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) -#define amd64_call_imm_size(inst,disp,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_imm((inst),(disp)); } while (0) +#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) +#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) +#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) //#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) #define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) #define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) -- cgit v1.1 From 800d43a2433ffc57d904687fdd2b746d5277cab5 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 5 May 2005 12:13:33 +0000 Subject: 2005-05-05 Zoltan Varga * alpha/tramp.c: Applied patch from Jakub Bogusz . svn path=/trunk/mono/; revision=44078 --- ChangeLog | 4 ++++ alpha/tramp.c | 47 +++++++++++++++++++++++++++++++---------------- 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5e9b2e5..6306938 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-05 Zoltan Varga + + * alpha/tramp.c: Applied patch from Jakub Bogusz . + 2005-03-28 Zoltan Varga * amd64/amd64-codegen.h: Avoid emitting a rex in some places. diff --git a/alpha/tramp.c b/alpha/tramp.c index ee5e94c..7d9fe02 100644 --- a/alpha/tramp.c +++ b/alpha/tramp.c @@ -116,8 +116,8 @@ emit_prolog (guint8 *p, const gint SIZE, int hasthis ) { // 9 instructions. alpha_ldah( p, alpha_gp, alpha_pv, 0 ); - alpha_lda( p, alpha_sp, alpha_sp, -SIZE ); // grow stack down SIZE alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(pv) + alpha_lda( p, alpha_sp, alpha_sp, -((SIZE & 8) ? (SIZE+8) : SIZE) ); // grow stack down SIZE (align to 16 bytes like gcc does) /* TODO: we really don't need to store everything. alpha_a1: We have to store this in order to return the retval. @@ -181,7 +181,7 @@ emit_epilog (guint8 *p, const gint SIZE ) /* restore fp, ra, sp */ alpha_ldq( p, alpha_ra, alpha_sp, SIZE-24 ); alpha_ldq( p, alpha_fp, alpha_sp, SIZE-16 ); - alpha_lda( p, alpha_sp, alpha_sp, SIZE ); + alpha_lda( p, alpha_sp, alpha_sp, ((SIZE & 8) ? (SIZE+8) : SIZE) ); /* return */ alpha_ret( p, alpha_ra, 1 ); @@ -242,7 +242,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) // allocate. buffer = p = malloc(BUFFER_SIZE); memset( buffer, 0, BUFFER_SIZE ); - pos = 0; + pos = 8 * (sig->param_count - alpharegs - 1); // Ok, start creating this thing. p = emit_prolog( p, STACK_SIZE, hasthis ); @@ -254,15 +254,12 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) if( param->byref ) { - if( i > alpharegs ) + if( i >= alpharegs ) { // load into temp register, then store on the stack alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i )); - alpha_stl( p, alpha_t1, alpha_sp, pos ); - pos += 8; - - if( pos > 128 ) - g_error( "Too large." ); + alpha_stq( p, alpha_t1, alpha_sp, pos ); + pos -= 8; } else { @@ -275,8 +272,8 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) simple_type = param->type; if( simple_type == MONO_TYPE_VALUETYPE ) { - if (sig->ret->data.klass->enumtype) - simple_type = sig->ret->data.klass->enum_basetype->type; + if (param->data.klass->enumtype) + simple_type = param->data.klass->enum_basetype->type; } switch (simple_type) @@ -291,20 +288,35 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: + // 4 bytes - need to sign-extend (stackvals are not extended) + if( i >= alpharegs ) + { + // load into temp register, then store on the stack + alpha_ldl( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); + alpha_stq( p, alpha_t1, alpha_sp, pos ); + pos -= 8; + } + else + { + // load into register + alpha_ldl( p, regbase + i, alpha_t0, ARG_LOC(i) ); + } + break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: + case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: case MONO_TYPE_I8: // 8 bytes - if( i > alpharegs ) + if( i >= alpharegs ) { // load into temp register, then store on the stack alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); alpha_stq( p, alpha_t1, alpha_sp, pos ); - pos += 8; + pos -= 8; } else { @@ -321,7 +333,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_ft2 ); alpha_stt( p, alpha_ft2, alpha_sp, pos ); - pos += 8; + pos -= 8; } else { @@ -334,7 +346,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) g_error ("Not implemented: ValueType as parameter to delegate." ); break; default: - g_error( "Not implemented." ); + g_error( "Not implemented: 0x%x.", simple_type ); break; } } @@ -346,7 +358,10 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) p = emit_epilog( p, STACK_SIZE ); if( p > buffer + BUFFER_SIZE ) - g_error( "Buffer overflow." ); + g_error( "Buffer overflow: got 0x%lx, expected <=0x%x.", (long)(p-buffer), BUFFER_SIZE ); + + /* flush instruction cache to see trampoline code */ + asm volatile("imb":::"memory"); return (MonoPIFunc)buffer; } -- cgit v1.1 From 82a68f6e85fbc7aaa7832584b2f51953871f1390 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 14 May 2005 17:35:42 +0000 Subject: 2005-05-14 Zoltan Varga * ia64/ia64-codegen.h: Add IA64 code generation macros. * Makefile.am: Add ia64 subdir. svn path=/trunk/mono/; revision=44523 --- ChangeLog | 6 + Makefile.am | 2 +- ia64/Makefile.am | 0 ia64/codegen.c | 728 ++++++++++++++++++ ia64/ia64-codegen.h | 2024 +++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 2759 insertions(+), 1 deletion(-) create mode 100644 ia64/Makefile.am create mode 100644 ia64/codegen.c create mode 100644 ia64/ia64-codegen.h diff --git a/ChangeLog b/ChangeLog index 6306938..8122045 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2005-05-14 Zoltan Varga + + * ia64/ia64-codegen.h: Add IA64 code generation macros. + + * Makefile.am: Add ia64 subdir. + 2005-05-05 Zoltan Varga * alpha/tramp.c: Applied patch from Jakub Bogusz . diff --git a/Makefile.am b/Makefile.am index 9eb9ee8..57c353d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ SUBDIRS = $(arch_target) -DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 +DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/ia64/Makefile.am b/ia64/Makefile.am new file mode 100644 index 0000000..e69de29 diff --git a/ia64/codegen.c b/ia64/codegen.c new file mode 100644 index 0000000..3f9e60d --- /dev/null +++ b/ia64/codegen.c @@ -0,0 +1,728 @@ +/* + * codegen.c: Tests for the IA64 code generation macros + */ + +#include +#include +#include + +void +mono_disassemble_code (guint8 *code, int size, char *id) +{ + int i; + FILE *ofd; + const char *tmp = g_get_tmp_dir (); + const char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS"); + char *as_file; + char *o_file; + char *cmd; + + as_file = g_strdup_printf ("%s/test.s", tmp); + + if (!(ofd = fopen (as_file, "w"))) + g_assert_not_reached (); + + for (i = 0; id [i]; ++i) { + if (!isalnum (id [i])) + fprintf (ofd, "_"); + else + fprintf (ofd, "%c", id [i]); + } + fprintf (ofd, ":\n"); + + for (i = 0; i < size; ++i) + fprintf (ofd, ".byte %d\n", (unsigned int) code [i]); + + fclose (ofd); + +#define DIS_CMD "ia64-linux-gnu-objdump -d" +#define AS_CMD "ia64-linux-gnu-as" + + o_file = g_strdup_printf ("%s/test.o", tmp); + cmd = g_strdup_printf (AS_CMD " %s -o %s", as_file, o_file); + system (cmd); + g_free (cmd); + if (!objdump_args) + objdump_args = ""; + + cmd = g_strdup_printf (DIS_CMD " %s %s", objdump_args, o_file); + system (cmd); + g_free (cmd); + + g_free (o_file); + g_free (as_file); +} + +int +main () +{ + Ia64CodegenState code; + + guint8 *buf = g_malloc0 (40960); + + ia64_codegen_init (code, buf); + + ia64_add (code, 1, 2, 3); + ia64_add1 (code, 1, 2, 3); + ia64_sub (code, 1, 2, 3); + ia64_sub1 (code, 1, 2, 3); + ia64_addp4 (code, 1, 2, 3); + ia64_and (code, 1, 2, 3); + ia64_andcm (code, 1, 2, 3); + ia64_or (code, 1, 2, 3); + ia64_xor (code, 1, 2, 3); + ia64_shladd (code, 1, 2, 3, 4); + ia64_shladdp4 (code, 1, 2, 3, 4); + ia64_sub_imm (code, 1, 0x7f, 2); + ia64_sub_imm (code, 1, -1, 2); + ia64_and_imm (code, 1, -128, 2); + ia64_andcm_imm (code, 1, -128, 2); + ia64_or_imm (code, 1, -128, 2); + ia64_xor_imm (code, 1, -128, 2); + ia64_adds_imm (code, 1, 8191, 2); + ia64_adds_imm (code, 1, -8192, 2); + ia64_adds_imm (code, 1, 1234, 2); + ia64_adds_imm (code, 1, -1234, 2); + ia64_addp4_imm (code, 1, -1234, 2); + ia64_addl_imm (code, 1, 1234, 2); + ia64_addl_imm (code, 1, -1234, 2); + ia64_addl_imm (code, 1, 2097151, 2); + ia64_addl_imm (code, 1, -2097152, 2); + + ia64_cmp_lt (code, 1, 2, 1, 2); + ia64_cmp_ltu (code, 1, 2, 1, 2); + ia64_cmp_eq (code, 1, 2, 1, 2); + ia64_cmp_lt_unc (code, 1, 2, 1, 2); + ia64_cmp_ltu_unc (code, 1, 2, 1, 2); + ia64_cmp_eq_unc (code, 1, 2, 1, 2); + ia64_cmp_eq_and (code, 1, 2, 1, 2); + ia64_cmp_eq_or (code, 1, 2, 1, 2); + ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2); + ia64_cmp_ne_and (code, 1, 2, 1, 2); + ia64_cmp_ne_or (code, 1, 2, 1, 2); + ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2); + + ia64_cmp4_lt (code, 1, 2, 1, 2); + ia64_cmp4_ltu (code, 1, 2, 1, 2); + ia64_cmp4_eq (code, 1, 2, 1, 2); + ia64_cmp4_lt_unc (code, 1, 2, 1, 2); + ia64_cmp4_ltu_unc (code, 1, 2, 1, 2); + ia64_cmp4_eq_unc (code, 1, 2, 1, 2); + ia64_cmp4_eq_and (code, 1, 2, 1, 2); + ia64_cmp4_eq_or (code, 1, 2, 1, 2); + ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2); + ia64_cmp4_ne_and (code, 1, 2, 1, 2); + ia64_cmp4_ne_or (code, 1, 2, 1, 2); + ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2); + + ia64_cmp_gt_and (code, 1, 2, 0, 2); + ia64_cmp_gt_or (code, 1, 2, 0, 2); + ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2); + ia64_cmp_le_and (code, 1, 2, 0, 2); + ia64_cmp_le_or (code, 1, 2, 0, 2); + ia64_cmp_le_or_andcm (code, 1, 2, 0, 2); + ia64_cmp_ge_and (code, 1, 2, 0, 2); + ia64_cmp_ge_or (code, 1, 2, 0, 2); + ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2); + ia64_cmp_lt_and (code, 1, 2, 0, 2); + ia64_cmp_lt_or (code, 1, 2, 0, 2); + ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2); + + ia64_cmp4_gt_and (code, 1, 2, 0, 2); + ia64_cmp4_gt_or (code, 1, 2, 0, 2); + ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2); + ia64_cmp4_le_and (code, 1, 2, 0, 2); + ia64_cmp4_le_or (code, 1, 2, 0, 2); + ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2); + ia64_cmp4_ge_and (code, 1, 2, 0, 2); + ia64_cmp4_ge_or (code, 1, 2, 0, 2); + ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2); + ia64_cmp4_lt_and (code, 1, 2, 0, 2); + ia64_cmp4_lt_or (code, 1, 2, 0, 2); + ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2); + + ia64_cmp_lt_imm (code, 1, 2, 127, 2); + ia64_cmp_lt_imm (code, 1, 2, -128, 2); + + ia64_cmp_lt_imm (code, 1, 2, -128, 2); + ia64_cmp_ltu_imm (code, 1, 2, -128, 2); + ia64_cmp_eq_imm (code, 1, 2, -128, 2); + ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2); + ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2); + ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2); + ia64_cmp_eq_and_imm (code, 1, 2, -128, 2); + ia64_cmp_eq_or_imm (code, 1, 2, -128, 2); + ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2); + ia64_cmp_ne_and_imm (code, 1, 2, -128, 2); + ia64_cmp_ne_or_imm (code, 1, 2, -128, 2); + ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2); + + ia64_cmp4_lt_imm (code, 1, 2, -128, 2); + ia64_cmp4_ltu_imm (code, 1, 2, -128, 2); + ia64_cmp4_eq_imm (code, 1, 2, -128, 2); + ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2); + ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2); + ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2); + ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2); + ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2); + ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2); + ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2); + ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2); + ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2); + + ia64_padd1 (code, 1, 2, 3); + ia64_padd2 (code, 1, 2, 3); + ia64_padd4 (code, 1, 2, 3); + ia64_padd1_sss (code, 1, 2, 3); + ia64_padd2_sss (code, 1, 2, 3); + ia64_padd1_uuu (code, 1, 2, 3); + ia64_padd2_uuu (code, 1, 2, 3); + ia64_padd1_uus (code, 1, 2, 3); + ia64_padd2_uus (code, 1, 2, 3); + + ia64_psub1 (code, 1, 2, 3); + ia64_psub2 (code, 1, 2, 3); + ia64_psub4 (code, 1, 2, 3); + ia64_psub1_sss (code, 1, 2, 3); + ia64_psub2_sss (code, 1, 2, 3); + ia64_psub1_uuu (code, 1, 2, 3); + ia64_psub2_uuu (code, 1, 2, 3); + ia64_psub1_uus (code, 1, 2, 3); + ia64_psub2_uus (code, 1, 2, 3); + + ia64_pavg1 (code, 1, 2, 3); + ia64_pavg2 (code, 1, 2, 3); + ia64_pavg1_raz (code, 1, 2, 3); + ia64_pavg2_raz (code, 1, 2, 3); + ia64_pavgsub1 (code, 1, 2, 3); + ia64_pavgsub2 (code, 1, 2, 3); + ia64_pcmp1_eq (code, 1, 2, 3); + ia64_pcmp2_eq (code, 1, 2, 3); + ia64_pcmp4_eq (code, 1, 2, 3); + ia64_pcmp1_gt (code, 1, 2, 3); + ia64_pcmp2_gt (code, 1, 2, 3); + ia64_pcmp4_gt (code, 1, 2, 3); + + ia64_pshladd2 (code, 1, 2, 3, 4); + ia64_pshradd2 (code, 1, 2, 3, 4); + + ia64_pmpyshr2 (code, 1, 2, 3, 0); + ia64_pmpyshr2_u (code, 1, 2, 3, 0); + ia64_pmpyshr2 (code, 1, 2, 3, 7); + ia64_pmpyshr2_u (code, 1, 2, 3, 7); + ia64_pmpyshr2 (code, 1, 2, 3, 15); + ia64_pmpyshr2_u (code, 1, 2, 3, 15); + ia64_pmpyshr2 (code, 1, 2, 3, 16); + ia64_pmpyshr2_u (code, 1, 2, 3, 16); + + ia64_pmpy2_r (code, 1, 2, 3); + ia64_pmpy2_l (code, 1, 2, 3); + ia64_mix1_r (code, 1, 2, 3); + ia64_mix2_r (code, 1, 2, 3); + ia64_mix4_r (code, 1, 2, 3); + ia64_mix1_l (code, 1, 2, 3); + ia64_mix2_l (code, 1, 2, 3); + ia64_mix4_l (code, 1, 2, 3); + ia64_pack2_uss (code, 1, 2, 3); + ia64_pack2_sss (code, 1, 2, 3); + ia64_pack4_sss (code, 1, 2, 3); + ia64_unpack1_h (code, 1, 2, 3); + ia64_unpack2_h (code, 1, 2, 3); + ia64_unpack4_h (code, 1, 2, 3); + ia64_unpack1_l (code, 1, 2, 3); + ia64_unpack2_l (code, 1, 2, 3); + ia64_unpack4_l (code, 1, 2, 3); + ia64_pmin1_u (code, 1, 2, 3); + ia64_pmax1_u (code, 1, 2, 3); + ia64_pmin2 (code, 1, 2, 3); + ia64_pmax2 (code, 1, 2, 3); + ia64_psad1 (code, 1, 2, 3); + + ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST); + ia64_mux1 (code, 1, 2, IA64_MUX1_MIX); + ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF); + ia64_mux1 (code, 1, 2, IA64_MUX1_ALT); + ia64_mux1 (code, 1, 2, IA64_MUX1_REV); + + ia64_mux2 (code, 1, 2, 0x8d); + + ia64_pshr2 (code, 1, 2, 3); + ia64_pshr4 (code, 1, 2, 3); + ia64_shr (code, 1, 2, 3); + ia64_pshr2_u (code, 1, 2, 3); + ia64_pshr4_u (code, 1, 2, 3); + ia64_shr_u (code, 1, 2, 3); + + ia64_pshr2_imm (code, 1, 2, 20); + ia64_pshr4_imm (code, 1, 2, 20); + ia64_pshr2_u_imm (code, 1, 2, 20); + ia64_pshr4_u_imm (code, 1, 2, 20); + + ia64_pshl2 (code, 1, 2, 3); + ia64_pshl4 (code, 1, 2, 3); + ia64_shl (code, 1, 2, 3); + + ia64_pshl2_imm (code, 1, 2, 20); + ia64_pshl4_imm (code, 1, 2, 20); + + ia64_popcnt (code, 1, 2); + + ia64_shrp (code, 1, 2, 3, 62); + + ia64_extr_u (code, 1, 2, 62, 61); + ia64_extr (code, 1, 2, 62, 61); + + ia64_dep_z (code, 1, 2, 62, 61); + + ia64_dep_z_imm (code, 1, 127, 62, 61); + ia64_dep_z_imm (code, 1, -128, 62, 61); + ia64_dep_imm (code, 1, 0, 2, 62, 61); + ia64_dep_imm (code, 1, -1, 2, 62, 61); + ia64_dep (code, 1, 2, 3, 10, 15); + + ia64_tbit_z (code, 1, 2, 3, 0); + + ia64_tbit_z (code, 1, 2, 3, 63); + ia64_tbit_z_unc (code, 1, 2, 3, 63); + ia64_tbit_z_and (code, 1, 2, 3, 63); + ia64_tbit_nz_and (code, 1, 2, 3, 63); + ia64_tbit_z_or (code, 1, 2, 3, 63); + ia64_tbit_nz_or (code, 1, 2, 3, 63); + ia64_tbit_z_or_andcm (code, 1, 2, 3, 63); + ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63); + + ia64_tnat_z (code, 1, 2, 3); + ia64_tnat_z_unc (code, 1, 2, 3); + ia64_tnat_z_and (code, 1, 2, 3); + ia64_tnat_nz_and (code, 1, 2, 3); + ia64_tnat_z_or (code, 1, 2, 3); + ia64_tnat_nz_or (code, 1, 2, 3); + ia64_tnat_z_or_andcm (code, 1, 2, 3); + ia64_tnat_nz_or_andcm (code, 1, 2, 3); + + ia64_nop_i (code, 0x1234); + ia64_hint_i (code, 0x1234); + + ia64_break_i (code, 0x1234); + + ia64_chk_s_i (code, 1, 0); + ia64_chk_s_i (code, 1, -1); + ia64_chk_s_i (code, 1, 1); + + ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); + ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); + ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); + ia64_mov_ret_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + + ia64_mov_from_breg (code, 1, 1); + + ia64_mov_to_pred (code, 1, 0xfe); + + ia64_mov_to_pred_rot_imm (code, 0xff0000); + + ia64_mov_from_ip (code, 1); + ia64_mov_from_pred (code, 1); + + ia64_mov_to_ar_i (code, 1, 1); + + ia64_mov_to_ar_imm_i (code, 1, 127); + + ia64_mov_from_ar_i (code, 1, 1); + + ia64_zxt1 (code, 1, 2); + ia64_zxt2 (code, 1, 2); + ia64_zxt4 (code, 1, 2); + ia64_sxt1 (code, 1, 2); + ia64_sxt2 (code, 1, 2); + ia64_sxt4 (code, 1, 2); + + ia64_czx1_l (code, 1, 2); + ia64_czx2_l (code, 1, 2); + ia64_czx1_r (code, 1, 2); + ia64_czx2_r (code, 1, 2); + + ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE); + ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1); + ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA); + + ia64_ld1_hint (code, 1, 2, 0); + ia64_ld2_hint (code, 1, 2, 0); + ia64_ld4_hint (code, 1, 2, 0); + ia64_ld8_hint (code, 1, 2, 0); + + ia64_ld1_s_hint (code, 1, 2, 0); + ia64_ld2_s_hint (code, 1, 2, 0); + ia64_ld4_s_hint (code, 1, 2, 0); + ia64_ld8_s_hint (code, 1, 2, 0); + + ia64_ld1_a_hint (code, 1, 2, 0); + ia64_ld2_a_hint (code, 1, 2, 0); + ia64_ld4_a_hint (code, 1, 2, 0); + ia64_ld8_a_hint (code, 1, 2, 0); + + ia64_ld1_sa_hint (code, 1, 2, 0); + ia64_ld2_sa_hint (code, 1, 2, 0); + ia64_ld4_sa_hint (code, 1, 2, 0); + ia64_ld8_sa_hint (code, 1, 2, 0); + + ia64_ld1_bias_hint (code, 1, 2, 0); + ia64_ld2_bias_hint (code, 1, 2, 0); + ia64_ld4_bias_hint (code, 1, 2, 0); + ia64_ld8_bias_hint (code, 1, 2, 0); + + ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE); + + ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE); + ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE); + + ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA); + + ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE); + + ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); + + ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE); + + ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE); + ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); + + ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + + ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + + ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); + + ia64_ldfs_hint (code, 1, 2, 0); + ia64_ldfd_hint (code, 1, 2, 0); + ia64_ldf8_hint (code, 1, 2, 0); + ia64_ldfe_hint (code, 1, 2, 0); + + ia64_ldfs_s_hint (code, 1, 2, 0); + ia64_ldfd_s_hint (code, 1, 2, 0); + ia64_ldf8_s_hint (code, 1, 2, 0); + ia64_ldfe_s_hint (code, 1, 2, 0); + + ia64_ldfs_a_hint (code, 1, 2, 0); + ia64_ldfd_a_hint (code, 1, 2, 0); + ia64_ldf8_a_hint (code, 1, 2, 0); + ia64_ldfe_a_hint (code, 1, 2, 0); + + ia64_ldfs_sa_hint (code, 1, 2, 0); + ia64_ldfd_sa_hint (code, 1, 2, 0); + ia64_ldf8_sa_hint (code, 1, 2, 0); + ia64_ldfe_sa_hint (code, 1, 2, 0); + + ia64_ldfs_c_clr_hint (code, 1, 2, 0); + ia64_ldfd_c_clr_hint (code, 1, 2, 0); + ia64_ldf8_c_clr_hint (code, 1, 2, 0); + ia64_ldfe_c_clr_hint (code, 1, 2, 0); + + ia64_ldfs_c_nc_hint (code, 1, 2, 0); + ia64_ldfd_c_nc_hint (code, 1, 2, 0); + ia64_ldf8_c_nc_hint (code, 1, 2, 0); + ia64_ldfe_c_nc_hint (code, 1, 2, 0); + + ia64_ldf_fill_hint (code, 1, 2, 0); + + ia64_ldfs_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0); + ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0); + ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0); + ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0); + + ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0); + ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_stfs_hint (code, 1, 2, 0); + ia64_stfd_hint (code, 1, 2, 0); + ia64_stf8_hint (code, 1, 2, 0); + ia64_stfe_hint (code, 1, 2, 0); + + ia64_stf_spill_hint (code, 1, 2, 0); + + ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0); + ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0); + ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0); + ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0); + + ia64_ldfps_hint (code, 1, 2, 3, 0); + ia64_ldfpd_hint (code, 1, 2, 3, 0); + ia64_ldfp8_hint (code, 1, 2, 3, 0); + + ia64_ldfps_s_hint (code, 1, 2, 3, 0); + ia64_ldfpd_s_hint (code, 1, 2, 3, 0); + ia64_ldfp8_s_hint (code, 1, 2, 3, 0); + + ia64_ldfps_a_hint (code, 1, 2, 3, 0); + ia64_ldfpd_a_hint (code, 1, 2, 3, 0); + ia64_ldfp8_a_hint (code, 1, 2, 3, 0); + + ia64_ldfps_sa_hint (code, 1, 2, 3, 0); + ia64_ldfpd_sa_hint (code, 1, 2, 3, 0); + ia64_ldfp8_sa_hint (code, 1, 2, 3, 0); + + ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0); + ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0); + ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0); + + ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0); + + ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0); + ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0); + ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0); + + ia64_lfetch_hint (code, 1, 0); + ia64_lfetch_excl_hint (code, 1, 0); + ia64_lfetch_fault_hint (code, 1, 0); + ia64_lfetch_fault_excl_hint (code, 1, 0); + + ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1); + ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2); + ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA); + + ia64_lfetch_inc_hint (code, 1, 2, 0); + ia64_lfetch_excl_inc_hint (code, 1, 2, 0); + ia64_lfetch_fault_inc_hint (code, 1, 2, 0); + ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0); + + ia64_lfetch_inc_imm_hint (code, 1, 255, 0); + ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0); + ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0); + ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0); + + ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0); + ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0); + ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0); + ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0); + ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0); + ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0); + ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0); + ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0); + ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0); + ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0); + ia64_xchg1_hint (code, 1, 2, 3, 0); + ia64_xchg2_hint (code, 1, 2, 3, 0); + ia64_xchg4_hint (code, 1, 2, 3, 0); + ia64_xchg8_hint (code, 1, 2, 3, 0); + + ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0); + ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0); + + ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0); + ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0); + ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0); + ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0); + + ia64_setf_sig (code, 1, 2); + ia64_setf_exp (code, 1, 2); + ia64_setf_s (code, 1, 2); + ia64_setf_d (code, 1, 2); + + ia64_getf_sig (code, 1, 2); + ia64_getf_exp (code, 1, 2); + ia64_getf_s (code, 1, 2); + ia64_getf_d (code, 1, 2); + + ia64_chk_s_m (code, 1, 0); + ia64_chk_s_m (code, 1, 1); + ia64_chk_s_m (code, 1, -1); + + ia64_chk_s_float_m (code, 1, 0); + + ia64_chk_a_nc (code, 1, 0); + ia64_chk_a_nc (code, 1, 1); + ia64_chk_a_nc (code, 1, -1); + + ia64_chk_a_nc (code, 1, 0); + ia64_chk_a_clr (code, 1, 0); + + ia64_chk_a_nc_float (code, 1, 0); + ia64_chk_a_clr_float (code, 1, 0); + + ia64_invala (code); + ia64_fwb (code); + ia64_mf (code); + ia64_mf_a (code); + ia64_srlz_d (code); + ia64_stlz_i (code); + ia64_sync_i (code); + + ia64_flushrs (code); + ia64_loadrs (code); + + ia64_invala_e (code, 1); + ia64_invala_e_float (code, 1); + + ia64_fc (code, 1); + ia64_fc_i (code, 1); + + ia64_mov_to_ar_m (code, 1, 1); + + ia64_mov_to_ar_imm_m (code, 1, 127); + + ia64_mov_from_ar_m (code, 1, 1); + + ia64_mov_to_cr (code, 1, 2); + + ia64_mov_from_cr (code, 1, 2); + + ia64_alloc (code, 1, 3, 4, 5, 0); + ia64_alloc (code, 1, 3, 4, 5, 8); + + ia64_mov_to_psr_l (code, 1); + ia64_mov_to_psr_um (code, 1); + + ia64_mov_from_psr (code, 1); + ia64_mov_from_psr_um (code, 1); + + ia64_break_m (code, 0x1234); + ia64_nop_m (code, 0x1234); + ia64_hint_m (code, 0x1234); + + ia64_br_cond_hint (code, 0, 0, 0, 0); + ia64_br_wexit_hint (code, 0, 0, 0, 0); + ia64_br_wtop_hint (code, 0, 0, 0, 0); + + ia64_br_cloop_hint (code, 0, 0, 0, 0); + ia64_br_cexit_hint (code, 0, 0, 0, 0); + ia64_br_ctop_hint (code, 0, 0, 0, 0); + + ia64_br_call_hint (code, 1, 0, 0, 0, 0); + + ia64_br_cond_reg_hint (code, 1, 0, 0, 0); + ia64_br_ia_reg_hint (code, 1, 0, 0, 0); + ia64_br_ret_reg_hint (code, 1, 0, 0, 0); + + ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0); + + ia64_cover (code); + ia64_clrrrb (code); + ia64_clrrrb_pr (code); + ia64_rfi (code); + ia64_bsw_0 (code); + ia64_bsw_1 (code); + ia64_epc (code); + + ia64_break_b (code, 0x1234); + ia64_nop_b (code, 0x1234); + ia64_hint_b (code, 0x1234); + + ia64_break_x (code, 0x2123456789ABCDEFULL); + + ia64_movl (code, 1, 0x123456789ABCDEF0LL); + + ia64_brl_cond_hint (code, 0, 0, 0, 0); + ia64_brl_cond_hint (code, -1, 0, 0, 0); + + ia64_brl_call_hint (code, 1, 0, 0, 0, 0); + ia64_brl_call_hint (code, 1, -1, 0, 0, 0); + + ia64_nop_x (code, 0x2123456789ABCDEFULL); + ia64_hint_x (code, 0x2123456789ABCDEFULL); + + ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL); + + ia64_codegen_close (code); + + mono_disassemble_code (buf, 40960, "code"); + + return 0; +} diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h new file mode 100644 index 0000000..7f2955f --- /dev/null +++ b/ia64/ia64-codegen.h @@ -0,0 +1,2024 @@ +/* + * ia64-codegen.h: Macros for generating ia64 code + * + * Authors: + * Zoltan Varga (vargaz@gmail.com) + * + * (C) 2005 Novell, Inc. + */ + +#ifndef _IA64_CODEGEN_H_ +#define _IA64_CODEGEN_H_ + +#include + +typedef enum { + IA64_INS_TYPE_A, + IA64_INS_TYPE_I, + IA64_INS_TYPE_M, + IA64_INS_TYPE_F, + IA64_INS_TYPE_B, + IA64_INS_TYPE_LX +} Ia64InsType; + +typedef enum { + IA64_TEMPLATE_MII = 0x00, + IA64_TEMPLATE_MIIS = 0x01, + IA64_TEMPLATE_MISI = 0x02, + IA64_TEMPLATE_MISIS = 0x03, + IA64_TEMPLATE_MLX = 0x04, + IA64_TEMPLATE_MLXS = 0x05, + IA64_TEMPLATE_UNUS1 = 0x06, + IA64_TEMPLATE_UNUS2 = 0x07, + IA64_TEMPLATE_MMI = 0x08, + IA64_TEMPLATE_MMIS = 0x09, + IA64_TEMPLATE_MSMI = 0x0A, + IA64_TEMPLATE_MSMIS = 0x0B, + IA64_TEMPLATE_MFI = 0x0C, + IA64_TEMPLATE_MFIS = 0x0D, + IA64_TEMPLATE_MMF = 0x0E, + IA64_TEMPLATE_MMFS = 0x0F, + IA64_TEMPLATE_MIB = 0x10, + IA64_TEMPLATE_MIBS = 0x11, + IA64_TEMPLATE_MBB = 0x12, + IA64_TEMPLATE_MBBS = 0x13, + IA64_TEMPLATE_UNUS3 = 0x14, + IA64_TEMPLATE_UNUS4 = 0x15, + IA64_TEMPLATE_BBB = 0x16, + IA64_TEMPLATE_BBBS = 0x17, + IA64_TEMPLATE_MMB = 0x18, + IA64_TEMPLATE_MMBS = 0x19, + IA64_TEMPLATE_UNUS5 = 0x1A, + IA64_TEMPLATE_UNUS6 = 0x1B, + IA64_TEMPLATE_MFB = 0x1C, + IA64_TEMPLATE_MFBS = 0x1D, + IA64_TEMPLATE_UNUS7 = 0x1E, + IA64_TEMPLATE_UNUS8 = 0x1F, +} Ia64BundleTemplate; + +#define IA64_NOP_I ((0x01 << 27)) +#define IA64_NOP_M ((0x01 << 27)) + +/* + * IA64 code cannot be emitted in the same way as code on other processors, + * since 3 instructions are combined into a bundle. This structure keeps track + * of already emitted instructions. + * + */ + +typedef struct { + guint8 *buf; + guint64 instructions [3]; + int itypes [3], stops [3]; + int nins; +} Ia64CodegenState; + +static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); + +/* + * FIXME: + * + * In order to simplify things, we emit a stop after every instruction for + * now. Also, we emit 1 ins + 2 nops. + */ + +#define ia64_codegen_init(code, buf) do { \ + code.buf = buf; \ + code.nins = 0; \ +} while (0) + +#define ia64_codegen_close(code) do { \ + ia64_emit_bundle (&code, TRUE); \ +} while (0) + +#define ia64_begin_bundle(code) do { \ + ia64_emit_bundle (&code, TRUE); \ +} while (0) + +#define ia64_emit_ins(code, itype, ins) do { \ + code.instructions [code.nins] = ins; \ + code.itypes [code.nins] = itype; \ + code.stops [code.nins] = 1; \ + code.nins ++; \ + if (code.nins == 3) \ + ia64_emit_bundle (&code, FALSE); \ +} while (0) + +#if G_BYTE_ORDER != G_LITTLE_ENDIAN +#error "FIXME" +#endif + +#define ia64_emit_bundle_template(code, template, i1, i2, i3) do { \ + guint64 dw1, dw2; \ + dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \ + dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \ + ((guint64*)code->buf)[0] = dw1; \ + ((guint64*)code->buf)[1] = dw2; \ + code->buf += 16; \ +} while (0) + +static void +ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) +{ + int i, template; + guint64 i1, i2, i3; + + for (i = 0; i < code->nins; ++i) { + switch (code->itypes [i]) { + case IA64_INS_TYPE_A: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I); + break; + case IA64_INS_TYPE_I: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I); + break; + case IA64_INS_TYPE_M: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I); + break; + case IA64_INS_TYPE_B: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [i]); + break; + case IA64_INS_TYPE_LX: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [i], code->instructions [i + 1]); + i ++; + break; + default: + g_assert_not_reached (); + } + } + + code->nins = 0; +} + +#if 1 + +#define check_assert(cond) g_assert((cond)) + +#else + +#define check_assert(cond) + +#endif + +#define check_greg(gr) check_assert ((guint64)(gr) < 128) + +#define check_freg(fr) check_assert ((guint64)(fr) < 128) + +#define check_preg(pr) check_assert ((guint64)(pr) < 64) + +#define check_breg(pr) check_assert ((guint64)(pr) < 8) + +#define check_count2(count) check_assert (((count) >= 1) && ((count) <= 4)) + +#define check_count5(count) check_assert (((count) >= 0) && ((count) < 32)) + +#define check_count6(count) check_assert (((count) >= 0) && ((count) < 64)) + +#define check_imm1(imm) check_assert (((gint64)(imm) >= -1) && ((gint64)(imm) <= 0)) +#define check_imm3(imm) check_assert (((gint64)(imm) >= -4) && ((gint64)(imm) <= 3)) +#define check_imm8(imm) check_assert (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127)) +#define check_imm9(imm) check_assert (((gint64)(imm) >= -256) && ((gint64)(imm) <= 255)) +#define check_imm14(imm) check_assert (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) +#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1))) +#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x400000) && ((gint64)(imm) <= (0x400000 - 1))) +#define check_imm62(imm) check_assert (((gint64)(imm) >= -0x2fffffffffffffffLL) && ((gint64)(imm) <= (0x2fffffffffffffffLL - 1))) + +#define check_len4(len) check_assert (((gint64)(len) >= 1) && ((gint64)(len) <= 16)) + +#define check_bwh(bwh) check_assert ((bwh) >= 0 && (bwh) <= IA64_BWH_DPNT) + +#define check_ph(ph) check_assert ((ph) >= 0 && (ph) <= IA64_PH_MANY) + +#define check_dh(dh) check_assert ((dh) >= 0 && (dh) <= IA64_DH_CLR) + +#define check_gregs(r1,r2,r3) do { check_greg ((r1)); check_greg ((r2)); check_greg ((r3)); } while (0) + +#define check_pregs(p1,p2) do { check_preg ((p1)); check_preg ((p2)); } while (0) + +#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0) + +#define ia64_emit_ins_1(code,itype,f1,o1) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)))) + +#define ia64_emit_ins_3(code,itype,f1,o1,f2,o2,f3,o3) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)))) + +#define ia64_emit_ins_5(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)))) + +#define ia64_emit_ins_6(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)))) + +#define ia64_emit_ins_7(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)))) + +#define ia64_emit_ins_8(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)))) + +#define ia64_emit_ins_9(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)))) + +#define ia64_emit_ins_10(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)))) + +#define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11)))) + +#define ia64_a1(code2, qp, r1, r2, r3, x2a, ve, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) + +#define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0) +#define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1) +#define ia64_sub_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 1) +#define ia64_sub1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 0) +#define ia64_addp4_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 2, 0) +#define ia64_and_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 0) +#define ia64_andcm_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 1) +#define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2) +#define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3) + +#define ia64_a2(code2, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) + +#define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count)) +#define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count)) + +#define ia64_a3(code2, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { check_greg ((r1)); check_greg ((r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0) + +#define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1) +#define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0) +#define ia64_andcm_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 1) +#define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2) +#define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3) + +#define ia64_a4(code2, qp, r1, imm14, r3, x2a, ve) do { check_greg ((r1)); check_greg ((r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0) + +#define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0) +#define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0) + +#define ia64_a5(code2, qp, r1, imm, r3) do { check_greg ((r1)); check_greg ((r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0) + +#define ia64_addl_imm_pred(code, qp,r1,imm22,r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3)) + +#define ia64_a6(code2, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) + +#define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0) +#define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0) +#define ia64_cmp_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 0) +#define ia64_cmp_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 1) +#define ia64_cmp_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 1) +#define ia64_cmp_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 1) +#define ia64_cmp_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 0) +#define ia64_cmp_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 0) +#define ia64_cmp_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 0) +#define ia64_cmp_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 1) +#define ia64_cmp_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 1) +#define ia64_cmp_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 1) + +#define ia64_cmp4_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 0) +#define ia64_cmp4_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 0) +#define ia64_cmp4_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 0) +#define ia64_cmp4_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 1) +#define ia64_cmp4_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 1) +#define ia64_cmp4_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 1) +#define ia64_cmp4_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 0) +#define ia64_cmp4_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 0) +#define ia64_cmp4_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 0) +#define ia64_cmp4_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 1) +#define ia64_cmp4_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 1) +#define ia64_cmp4_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 1) + +/* Pseudo ops */ +#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq ((code), (p2), (p1), (r2), (r3)) +#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p2), (p1), (r3), (r2)) +#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p1), (p2), (r3), (r2)) +#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p2), (p1), (r2), (r3)) +#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p2), (p1), (r3), (r2)) +#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p1), (p2), (r3), (r2)) +#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p2), (p1), (r2), (r3)) + +#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq ((code), (p2), (p1), (r2), (r3)) +#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p2), (p1), (r3), (r2)) +#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p1), (p2), (r3), (r2)) +#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p2), (p1), (r2), (r3)) +#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p2), (p1), (r3), (r2)) +#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p1), (p2), (r3), (r2)) +#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p2), (p1), (r2), (r3)) + +#define ia64_a7(code2, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) + +#define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0) +#define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0) +#define ia64_cmp_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 0) +#define ia64_cmp_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 1) +#define ia64_cmp_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 1) +#define ia64_cmp_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 1) +#define ia64_cmp_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 0) +#define ia64_cmp_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 0) +#define ia64_cmp_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 0) +#define ia64_cmp_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 1) +#define ia64_cmp_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 1) +#define ia64_cmp_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 1) + +#define ia64_cmp4_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 0) +#define ia64_cmp4_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 0) +#define ia64_cmp4_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 0) +#define ia64_cmp4_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 1) +#define ia64_cmp4_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 1) +#define ia64_cmp4_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 1) +#define ia64_cmp4_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 0) +#define ia64_cmp4_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 0) +#define ia64_cmp4_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 0) +#define ia64_cmp4_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 1) +#define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1) +#define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1) + +#define ia64_a8(code2, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { check_greg ((r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) + +#define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0) +#define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0) +#define ia64_cmp_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 0) +#define ia64_cmp_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 1) +#define ia64_cmp_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 1) +#define ia64_cmp_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 1) +#define ia64_cmp_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 0) +#define ia64_cmp_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 0) +#define ia64_cmp_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 0) +#define ia64_cmp_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 1) +#define ia64_cmp_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 1) +#define ia64_cmp_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 1) + +#define ia64_cmp4_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 0) +#define ia64_cmp4_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 0) +#define ia64_cmp4_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 0) +#define ia64_cmp4_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 1) +#define ia64_cmp4_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 1) +#define ia64_cmp4_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 1) +#define ia64_cmp4_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 0) +#define ia64_cmp4_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 0) +#define ia64_cmp4_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 0) +#define ia64_cmp4_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 1) +#define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1) +#define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1) + +#define ia64_a9(code2, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) + +#define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0) +#define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0) +#define ia64_padd4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0) +#define ia64_padd1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 1) +#define ia64_padd2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 1) +#define ia64_padd1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2) +#define ia64_padd2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 2) +#define ia64_padd1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 3) +#define ia64_padd2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 3) + +#define ia64_psub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 0) +#define ia64_psub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 0) +#define ia64_psub4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 1, 0) +#define ia64_psub1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 1) +#define ia64_psub2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 1) +#define ia64_psub1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 2) +#define ia64_psub2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 2) +#define ia64_psub1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 3) +#define ia64_psub2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 3) + +#define ia64_pavg1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2) +#define ia64_pavg2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 2) +#define ia64_pavg1_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 3) +#define ia64_pavg2_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 3) +#define ia64_pavgsub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 3, 2) +#define ia64_pavgsub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 3, 2) +#define ia64_pcmp1_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 0) +#define ia64_pcmp2_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 0) +#define ia64_pcmp4_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 0) +#define ia64_pcmp1_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 1) +#define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1) +#define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1) + +#define ia64_a10(code2, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) + +#define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count); +#define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count); + +#define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3))) + +#define ia64_i1(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { check_gregs ((r1), (r2), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count)); + +#define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count)); + +#define ia64_i2(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3) +#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3) +#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2) +#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) +#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) +#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2) +#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) +#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) +#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0) +#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0) +#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0) +#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1) +#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1) +#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1) +#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1) +#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1) +#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1) +#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0) +#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1) +#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0) +#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1) +#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2) + +typedef enum { + IA64_MUX1_BRCST = 0x0, + IA64_MUX1_MIX = 0x8, + IA64_MUX1_SHUF = 0x9, + IA64_MUX1_ALT = 0xa, + IA64_MUX1_REV = 0xb +} Ia64Mux1Permutation; + +#define ia64_i3(code2, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) + +#define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2) + +#define ia64_i4(code2, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) + +#define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2) + +#define ia64_i5(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0) +#define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0) +#define ia64_shr_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 2, 0) +#define ia64_pshr2_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 0) +#define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0) +#define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0) + +#define ia64_i6(code2, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0) +#define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0) +#define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0) +#define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0) + +#define ia64_i7(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pshl2_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) +#define ia64_pshl4_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) +#define ia64_shl_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1) + +#define ia64_i8(code2, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), 0); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1) +#define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1) + +#define ia64_i9(code2, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2) + +#define ia64_i10(code2, qp, r1, r2, r3, count, opcode, x2, x) do { check_gregs ((r1), (r2), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0) + +#define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0) + +#define ia64_i11(code2, qp, r1, r3, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) + +#define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0) +#define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1) + +#define ia64_i12(code2, qp, r1, r2, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) + +#define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0) + +#define ia64_i13(code2, qp, r1, imm, pos, len, x2, x, y) do { ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) + +#define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1) + +#define ia64_i14(code2, qp, r1, imm, r3, pos, len, x2, x) do { check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) + +#define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1) + +#define ia64_i15(code2, qp, r1, r2, r3, pos, len) do { check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0) + +#define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len)) + +#define ia64_i16(code2, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) + +#define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0) +#define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1) +#define ia64_tbit_z_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 0) +#define ia64_tbit_nz_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 1) +#define ia64_tbit_z_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 0) +#define ia64_tbit_nz_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 1) +#define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0) +#define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1) + +#define ia64_i17(code2, qp, p1, p2, r3, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) + +#define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0) +#define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1) +#define ia64_tnat_z_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 0) +#define ia64_tnat_nz_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 1) +#define ia64_tnat_z_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 0) +#define ia64_tnat_nz_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 1) +#define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0) +#define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1) + +#define ia64_i18(code2, qp, imm, x3, x6, y) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) + +#define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0) +#define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1) + +#define ia64_i19(code2, qp, imm, x3, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) + +#define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0) + +#define ia64_i20(code2, qp, r2, imm, x3) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1) + +#define ia64_i21(code2, qp, b1, r2, tag13, x3, x, ih, wh) do { check_imm8 (tag13); check_gregs (0, (r2), 0); check_breg ((b1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0) + +typedef enum { + IA64_MOV_TO_BR_WH_SPTK = 0, + IA64_MOV_TO_BR_WH_NONE = 1, + IA64_MOV_TO_BR_WH_DPTK = 2 +} Ia64MovToBrWhetherHint; + +typedef enum { + IA64_BR_IH_NONE = 0, + IA64_BR_IH_IMP = 1 +} Ia64BranchImportanceHint; + +#define ia64_mov_to_breg_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh) +#define ia64_mov_ret_to_breg_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh) + +#define ia64_i22(code2, qp, r1, b2, x3, x6) do { check_gregs ((r1), 0, 0); check_breg ((b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) + +#define ia64_mov_from_breg_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31); + +#define ia64_i23(code2, qp, r2, mask, x3) do { check_greg ((r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0) + +#define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3) + +#define ia64_i24(code2, qp, imm, x3) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2) + +#define ia64_i25(code2, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0) + +#define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30) +#define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33) + +#define ia64_i26(code2, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) + +#define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a) + +#define ia64_i27(code2, qp, ar3, imm, x3, x6) do { check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a) + +#define ia64_i28(code2, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) + +#define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32) + +#define ia64_i29(code2, qp, r1, r3, x3, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) + +#define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10) +#define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11) +#define ia64_zxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x12) +#define ia64_sxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x14) +#define ia64_sxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x15) +#define ia64_sxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x16) +#define ia64_czx1_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x18) +#define ia64_czx2_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x19) +#define ia64_czx1_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1C) +#define ia64_czx2_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1D) + +/* + * M Instruction Type + */ + +typedef enum { + IA64_LD_HINT_NONE = 0, + IA64_LD_HINT_NT1 = 1, + IA64_LD_HINT_NTA = 3 +} Ia64LoadHint; + +typedef enum { + IA64_ST_HINT_NONE = 0, + IA64_ST_HINT_NTA = 3 +} Ia64StoreHint; + +#define ia64_m1(code2, qp, r1, r3, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00) +#define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01) +#define ia64_ld4_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x02) +#define ia64_ld8_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x03) + +#define ia64_ld1_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x04) +#define ia64_ld2_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x05) +#define ia64_ld4_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x06) +#define ia64_ld8_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x07) + +#define ia64_ld1_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x08) +#define ia64_ld2_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x09) +#define ia64_ld4_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0A) +#define ia64_ld8_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0B) + +#define ia64_ld1_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0C) +#define ia64_ld2_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0D) +#define ia64_ld4_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0E) +#define ia64_ld8_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0F) + +#define ia64_ld1_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x10) +#define ia64_ld2_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x11) +#define ia64_ld4_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x12) +#define ia64_ld8_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x13) + +#define ia64_ld1_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x14) +#define ia64_ld2_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x15) +#define ia64_ld4_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x16) +#define ia64_ld8_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x17) + +#define ia64_ld8_fill_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x1B) + +#define ia64_ld1_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x20) +#define ia64_ld2_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x21) +#define ia64_ld4_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x22) +#define ia64_ld8_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x23) + +#define ia64_ld1_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x24) +#define ia64_ld2_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x25) +#define ia64_ld4_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x26) +#define ia64_ld8_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x27) + +#define ia64_ld1_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x28) +#define ia64_ld2_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x29) +#define ia64_ld4_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2A) +#define ia64_ld8_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2B) + +#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28) +#define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C) + +#define ia64_m2(code2, qp, r1, r2, r3, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00) +#define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01) +#define ia64_ld4_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x02) +#define ia64_ld8_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x03) + +#define ia64_ld1_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x04) +#define ia64_ld2_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x05) +#define ia64_ld4_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x06) +#define ia64_ld8_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x07) + +#define ia64_ld1_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x08) +#define ia64_ld2_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x09) +#define ia64_ld4_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0A) +#define ia64_ld8_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0B) + +#define ia64_ld1_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0C) +#define ia64_ld2_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0D) +#define ia64_ld4_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0E) +#define ia64_ld8_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0F) + +#define ia64_ld1_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x10) +#define ia64_ld2_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x11) +#define ia64_ld4_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x12) +#define ia64_ld8_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x13) + +#define ia64_ld1_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x14) +#define ia64_ld2_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x15) +#define ia64_ld4_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x16) +#define ia64_ld8_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x17) + +#define ia64_ld8_fill_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x1B) + +#define ia64_ld1_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x20) +#define ia64_ld2_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x21) +#define ia64_ld4_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x22) +#define ia64_ld8_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x23) + +#define ia64_ld1_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x24) +#define ia64_ld2_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x25) +#define ia64_ld4_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x26) +#define ia64_ld8_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x27) + +#define ia64_ld1_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x28) +#define ia64_ld2_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x29) +#define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A) +#define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B) + +#define ia64_m3(code2, qp, r1, r3, imm, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) + +#define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00) +#define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01) +#define ia64_ld4_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x02) +#define ia64_ld8_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x03) + +#define ia64_ld1_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x04) +#define ia64_ld2_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x05) +#define ia64_ld4_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x06) +#define ia64_ld8_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x07) + +#define ia64_ld1_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x08) +#define ia64_ld2_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x09) +#define ia64_ld4_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0A) +#define ia64_ld8_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0B) + +#define ia64_ld1_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0C) +#define ia64_ld2_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0D) +#define ia64_ld4_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0E) +#define ia64_ld8_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0F) + +#define ia64_ld1_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x10) +#define ia64_ld2_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x11) +#define ia64_ld4_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x12) +#define ia64_ld8_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x13) + +#define ia64_ld1_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x14) +#define ia64_ld2_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x15) +#define ia64_ld4_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x16) +#define ia64_ld8_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x17) + +#define ia64_ld8_fill_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x1B) + +#define ia64_ld1_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x20) +#define ia64_ld2_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x21) +#define ia64_ld4_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x22) +#define ia64_ld8_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x23) + +#define ia64_ld1_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x24) +#define ia64_ld2_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x25) +#define ia64_ld4_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x26) +#define ia64_ld8_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x27) + +#define ia64_ld1_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x28) +#define ia64_ld2_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x29) +#define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A) +#define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B) + +#define ia64_m4(code2, qp, r3, r2, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30) +#define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31) +#define ia64_st4_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x32) +#define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33) + +#define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34) +#define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35) +#define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36) +#define ia64_st8_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x37) + +#define ia64_st8_spill_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x3B) + +#define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30) +#define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34) + +#define ia64_m5(code2, qp, r3, r2, imm, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) + +#define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30) +#define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31) +#define ia64_st4_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x32) +#define ia64_st8_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x33) + +#define ia64_st1_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x34) +#define ia64_st2_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x35) +#define ia64_st4_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x36) +#define ia64_st8_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x37) + +#define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B) + +#define ia64_m6(code2, qp, f1, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02) +#define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03) +#define ia64_ldf8_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x01) +#define ia64_ldfe_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x00) + +#define ia64_ldfs_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x06) +#define ia64_ldfd_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x07) +#define ia64_ldf8_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x05) +#define ia64_ldfe_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x04) + +#define ia64_ldfs_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0A) +#define ia64_ldfd_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0B) +#define ia64_ldf8_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x09) +#define ia64_ldfe_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x08) + +#define ia64_ldfs_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0E) +#define ia64_ldfd_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0F) +#define ia64_ldf8_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0D) +#define ia64_ldfe_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0C) + +#define ia64_ldfs_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x22) +#define ia64_ldfd_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x23) +#define ia64_ldf8_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x21) +#define ia64_ldfe_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x20) + +#define ia64_ldfs_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x26) +#define ia64_ldfd_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x27) +#define ia64_ldf8_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x25) +#define ia64_ldfe_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x24) + +#define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B) + +#define ia64_m7(code2, qp, f1, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02) +#define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03) +#define ia64_ldf8_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x01) +#define ia64_ldfe_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x00) + +#define ia64_ldfs_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x06) +#define ia64_ldfd_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x07) +#define ia64_ldf8_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x05) +#define ia64_ldfe_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x04) + +#define ia64_ldfs_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0A) +#define ia64_ldfd_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0B) +#define ia64_ldf8_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x09) +#define ia64_ldfe_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x08) + +#define ia64_ldfs_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0E) +#define ia64_ldfd_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0F) +#define ia64_ldf8_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0D) +#define ia64_ldfe_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0C) + +#define ia64_ldfs_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x22) +#define ia64_ldfd_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x23) +#define ia64_ldf8_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x21) +#define ia64_ldfe_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x20) + +#define ia64_ldfs_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x26) +#define ia64_ldfd_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x27) +#define ia64_ldf8_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x25) +#define ia64_ldfe_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x24) + +#define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B) + +#define ia64_m8(code2, qp, f1, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) + +#define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02) +#define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03) +#define ia64_ldf8_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x01) +#define ia64_ldfe_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x00) + +#define ia64_ldfs_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x06) +#define ia64_ldfd_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x07) +#define ia64_ldf8_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x05) +#define ia64_ldfe_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x04) + +#define ia64_ldfs_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0A) +#define ia64_ldfd_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0B) +#define ia64_ldf8_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x09) +#define ia64_ldfe_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x08) + +#define ia64_ldfs_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0E) +#define ia64_ldfd_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0F) +#define ia64_ldf8_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0D) +#define ia64_ldfe_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0C) + +#define ia64_ldfs_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x22) +#define ia64_ldfd_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x23) +#define ia64_ldf8_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x21) +#define ia64_ldfe_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x20) + +#define ia64_ldfs_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x26) +#define ia64_ldfd_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x27) +#define ia64_ldf8_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x25) +#define ia64_ldfe_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x24) + +#define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B) + +#define ia64_m9(code2, qp, r3, f2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32) +#define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33) +#define ia64_stf8_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x31) +#define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30) +#define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B) + +#define ia64_m10(code2, qp, r3, f2, imm, hint, x6) do { check_greg ((r3)); check_freg ((f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) + +#define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32) +#define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33) +#define ia64_stf8_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x31) +#define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30) +#define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B) + +#define ia64_m11(code2, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02) +#define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03) +#define ia64_ldfp8_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x01) + +#define ia64_ldfps_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x06) +#define ia64_ldfpd_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x07) +#define ia64_ldfp8_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x05) + +#define ia64_ldfps_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0A) +#define ia64_ldfpd_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0B) +#define ia64_ldfp8_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x09) + +#define ia64_ldfps_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0E) +#define ia64_ldfpd_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0F) +#define ia64_ldfp8_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0D) + +#define ia64_ldfps_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x22) +#define ia64_ldfpd_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x23) +#define ia64_ldfp8_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x21) + +#define ia64_ldfps_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x26) +#define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27) +#define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25) + +#define ia64_m12(code2, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02) +#define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03) +#define ia64_ldfp8_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x01) + +#define ia64_ldfps_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x06) +#define ia64_ldfpd_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x07) +#define ia64_ldfp8_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x05) + +#define ia64_ldfps_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0A) +#define ia64_ldfpd_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0B) +#define ia64_ldfp8_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x09) + +#define ia64_ldfps_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0E) +#define ia64_ldfpd_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0F) +#define ia64_ldfp8_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0D) + +#define ia64_ldfps_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x22) +#define ia64_ldfpd_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x23) +#define ia64_ldfp8_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x21) + +#define ia64_ldfps_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x26) +#define ia64_ldfpd_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x27) +#define ia64_ldfp8_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x25) + +typedef enum { + IA64_LFHINT_NONE = 0, + IA64_LFHINT_NT1 = 1, + IA64_LFHINT_NT2 = 2, + IA64_LFHINT_NTA = 3 +} Ia64LinePrefetchHint; + +#define ia64_m13(code2, qp, r3, hint, m, x, x6) do { check_greg ((r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C) +#define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D) +#define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E) +#define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F) + +#define ia64_m14(code2, qp, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_greg ((r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C) +#define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D) +#define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E) +#define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F) + +#define ia64_m15(code2, qp, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) + +#define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C) +#define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D) +#define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E) +#define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F) + +#define ia64_m16(code2, qp, r1, r3, r2, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00) +#define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01) +#define ia64_cmpxchg4_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x02) +#define ia64_cmpxchg8_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x03) +#define ia64_cmpxchg1_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x04) +#define ia64_cmpxchg2_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x05) +#define ia64_cmpxchg4_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x06) +#define ia64_cmpxchg8_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x07) +#define ia64_cmpxchg16_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x20) +#define ia64_cmpxchg16_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x24) +#define ia64_xchg1_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x08) +#define ia64_xchg2_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x09) +#define ia64_xchg4_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0A) +#define ia64_xchg8_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0B) + +#define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3))) + +#define ia64_m17(code2, qp, r1, r3, imm, hint, m, x, x6) do { int aimm = (imm) < 0 ? - (imm) : (imm); check_gregs ((r1), 0, (r3)); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) +#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) +#define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16) +#define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17) + +#define ia64_m18(code2, qp, f1, r2, m, x, x6) do { check_greg ((r2)); check_freg ((f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0) + +#define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C) +#define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D) +#define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E) +#define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F) + +#define ia64_m19(code2, qp, r1, f2, m, x, x6) do { check_greg ((r1)); check_freg ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0) + +#define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C) +#define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D) +#define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E) +#define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F) + +#define ia64_m20(code2, qp, r2, imm, x3) do { check_greg ((r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) + +#define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1) + +#define ia64_m21(code2, qp, f2, imm, x3) do { check_freg ((f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) + +#define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3) + +#define ia64_m22(code2, qp, r1, imm, x3) do { check_greg ((r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4) +#define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5) + +#define ia64_m23(code2, qp, f1, imm, x3) do { check_freg ((f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6) +#define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7) + +#define ia64_m24(code2, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) + +#define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1) +#define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2) +#define ia64_mf_pred(code, qp) ia64_m24 ((code), (qp), 0, 2, 2) +#define ia64_mf_a_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 2) +#define ia64_srlz_d_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 3) +#define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3) +#define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3) + +#define ia64_m25(code2, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) + +#define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0) +#define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0) + +#define ia64_m26(code2, qp, r1, x3, x4, x2) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) + +#define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1) + +#define ia64_m27(code2, qp, f1, x3, x4, x2) do { check_freg ((f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) + +#define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1) + +#define ia64_m28(code2, qp, r3, x3, x6, x) do { check_greg ((r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0) + +#define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0) +#define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1) + +#define ia64_m29(code2, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a) + +#define ia64_m30(code2, qp, ar3, imm, x3, x4, x2) do { check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2) + +#define ia64_m31(code2, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22) +#define ia64_m32(code2, qp, cr3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C) + +#define ia64_m33(code2, qp, r1, cr3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24) + +#define ia64_m34(code2, qp, r1, sor, sol, sof, x3) do { check_greg ((r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); ia64_begin_bundle ((code)); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0) + +#define ia64_m35(code2, qp, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D) +#define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29) + +#define ia64_m36(code2, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25) +#define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21) + +#define ia64_m37(code2, qp, imm, x3, x2, x4) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0) + +/* The System/Memory Management instruction encodings (M38-M47) are missing */ + +#define ia64_m48(code2, qp, imm, x3, x4, x2, y) do { check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) + +#define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0) +#define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1) + +typedef enum { + IA64_BWH_SPTK = 0, + IA64_BWH_SPNT = 1, + IA64_BWH_DPTK = 2, + IA64_BWH_DPNT = 3 +} Ia64BranchWhetherHint; + +typedef enum { + IA64_PH_FEW = 0, + IA64_PH_MANY = 1 +} Ia64SeqPrefetchHint; + +typedef enum { + IA64_DH_NONE = 0, + IA64_DH_CLR = 1 +} Ia64BranchCacheDeallocHint; + +#define ia64_b1(code2, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) + +#define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0) +#define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2) +#define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3) + +#define ia64_b2(code2, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) + +#define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5) +#define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6) +#define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7) + +#define ia64_b3(code2, qp, b1, imm, bwh, ph, dh) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); check_breg ((b1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) + +#define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) + +#define ia64_b4(code2, qp, b2, bwh, ph, dh, x6, btype) do { check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) + +#define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0) +#define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1) +#define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4) + +#define ia64_b5(code2, qp, b1, b2, bwh, ph, dh) do { check_breg ((b1)); check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) + +#define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) + +typedef enum { + IA64_IPWH_SPTK = 0, + IA64_IPWH_LOOP = 1, + IA64_IPWH_DPTK = 2, + IA64_IPWH_EXIT = 3 +} Ia64IPRelativeBranchWhetherHint; + +/* B6 and B7 is missing */ + +#define ia64_b8(code2, qp, x6) do { ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0) + +#define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02) +#define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04) +#define ia64_clrrrb_pr_pred(code, qp) ia64_b8 ((code), (qp), 0x05) +#define ia64_rfi_pred(code, qp) ia64_b8 ((code), (qp), 0x08) +#define ia64_bsw_0_pred(code, qp) ia64_b8 ((code), (qp), 0x0C) +#define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D) +#define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10) + +#define ia64_b9(code2, qp, imm, opcode, x6) do { check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) + +#define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00) +#define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00) +#define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01) + +#define ia64_x1(code2, qp, imm, x3, x6) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) + +#define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) + +#define ia64_x2(code2, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x3f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) + +#define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) + +#define ia64_x3(code2, qp, imm, bwh, ph, dh, btype) do { ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0) + +#define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0) + +#define ia64_x4(code2, qp, b1, imm, bwh, ph, dh) do { check_breg ((b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0) + +#define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) + +#define ia64_x5(code2, qp, imm, x3, x6, y) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) + +#define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0) +#define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1) + +/* + * Non predicated instruction variants + */ + + +#define ia64_add(code, r1, r2, r3) ia64_add_pred ((code), 0, r1, r2, r3) +#define ia64_add1(code, r1, r2, r3) ia64_add1_pred ((code), 0, r1, r2, r3) +#define ia64_sub(code, r1, r2, r3) ia64_sub_pred ((code), 0, r1, r2, r3) +#define ia64_sub1(code, r1, r2, r3) ia64_sub1_pred ((code), 0, r1, r2, r3) +#define ia64_addp4(code, r1, r2, r3) ia64_addp4_pred ((code), 0, r1, r2, r3) +#define ia64_and(code, r1, r2, r3) ia64_and_pred ((code), 0, r1, r2, r3) +#define ia64_andcm(code, r1, r2, r3) ia64_andcm_pred ((code), 0, r1, r2, r3) +#define ia64_or(code, r1, r2, r3) ia64_or_pred ((code), 0, r1, r2, r3) +#define ia64_xor(code, r1, r2, r3) ia64_xor_pred ((code), 0, r1, r2, r3) + + +#define ia64_shladd(code, r1, r2, r3,count) ia64_shladd_pred ((code), 0, r1, r2, r3,count) +#define ia64_shladdp4(code, r1, r2, r3,count) ia64_shladdp4_pred ((code), 0, r1, r2, r3,count) + + +#define ia64_sub_imm(code, r1,imm8,r3) ia64_sub_imm_pred ((code), 0, r1,imm8,r3) +#define ia64_and_imm(code, r1,imm8,r3) ia64_and_imm_pred ((code), 0, r1,imm8,r3) +#define ia64_andcm_imm(code, r1,imm8,r3) ia64_andcm_imm_pred ((code), 0, r1,imm8,r3) +#define ia64_or_imm(code, r1,imm8,r3) ia64_or_imm_pred ((code), 0, r1,imm8,r3) +#define ia64_xor_imm(code, r1,imm8,r3) ia64_xor_imm_pred ((code), 0, r1,imm8,r3) + + +#define ia64_adds_imm(code, r1,imm14,r3) ia64_adds_imm_pred ((code), 0, r1,imm14,r3) +#define ia64_addp4_imm(code, r1,imm14,r3) ia64_addp4_imm_pred ((code), 0, r1,imm14,r3) + + +#define ia64_addl_imm(code, r1,imm22,r3) ia64_addl_imm_pred ((code), 0, r1,imm22,r3) + + +#define ia64_cmp_lt(code, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ltu(code, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_eq(code, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_lt_unc(code, p1, p2, r2, r3) ia64_cmp_lt_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ltu_unc(code, p1, p2, r2, r3) ia64_cmp_ltu_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_eq_unc(code, p1, p2, r2, r3) ia64_cmp_eq_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_eq_and(code, p1, p2, r2, r3) ia64_cmp_eq_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_eq_or(code, p1, p2, r2, r3) ia64_cmp_eq_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ne_and(code, p1, p2, r2, r3) ia64_cmp_ne_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ne_or(code, p1, p2, r2, r3) ia64_cmp_ne_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3) + +#define ia64_cmp4_lt(code, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ltu(code, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_eq(code, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_lt_unc(code, p1, p2, r2, r3) ia64_cmp4_lt_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ltu_unc(code, p1, p2, r2, r3) ia64_cmp4_ltu_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_eq_unc(code, p1, p2, r2, r3) ia64_cmp4_eq_unc_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_eq_and(code, p1, p2, r2, r3) ia64_cmp4_eq_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_eq_or(code, p1, p2, r2, r3) ia64_cmp4_eq_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ne_and(code, p1, p2, r2, r3) ia64_cmp4_ne_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ne_or(code, p1, p2, r2, r3) ia64_cmp4_ne_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3) + +/* Pseudo ops */ +#define ia64_cmp_ne(code, p1, p2, r2, r3) ia64_cmp_ne_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_le(code, p1, p2, r2, r3) ia64_cmp_le_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_gt(code, p1, p2, r2, r3) ia64_cmp_gt_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ge(code, p1, p2, r2, r3) ia64_cmp_ge_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_leu(code, p1, p2, r2, r3) ia64_cmp_leu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_gtu(code, p1, p2, r2, r3) ia64_cmp_gtu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_geu(code, p1, p2, r2, r3) ia64_cmp_geu_pred ((code), 0, p1, p2, r2, r3) + +#define ia64_cmp4_ne(code, p1, p2, r2, r3) ia64_cmp4_ne_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_le(code, p1, p2, r2, r3) ia64_cmp4_le_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_gt(code, p1, p2, r2, r3) ia64_cmp4_gt_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ge(code, p1, p2, r2, r3) ia64_cmp4_ge_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_leu(code, p1, p2, r2, r3) ia64_cmp4_leu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_gtu(code, p1, p2, r2, r3) ia64_cmp4_gtu_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_geu(code, p1, p2, r2, r3) ia64_cmp4_geu_pred ((code), 0, p1, p2, r2, r3) + + +#define ia64_cmp_gt_and(code, p1, p2, r2, r3) ia64_cmp_gt_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_gt_or(code, p1, p2, r2, r3) ia64_cmp_gt_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_le_and(code, p1, p2, r2, r3) ia64_cmp_le_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_le_or(code, p1, p2, r2, r3) ia64_cmp_le_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp_le_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ge_and(code, p1, p2, r2, r3) ia64_cmp_ge_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ge_or(code, p1, p2, r2, r3) ia64_cmp_ge_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_lt_and(code, p1, p2, r2, r3) ia64_cmp_lt_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_lt_or(code, p1, p2, r2, r3) ia64_cmp_lt_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3) + +#define ia64_cmp4_gt_and(code, p1, p2, r2, r3) ia64_cmp4_gt_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_gt_or(code, p1, p2, r2, r3) ia64_cmp4_gt_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_le_and(code, p1, p2, r2, r3) ia64_cmp4_le_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_le_or(code, p1, p2, r2, r3) ia64_cmp4_le_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_le_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ge_and(code, p1, p2, r2, r3) ia64_cmp4_ge_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ge_or(code, p1, p2, r2, r3) ia64_cmp4_ge_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_lt_and(code, p1, p2, r2, r3) ia64_cmp4_lt_and_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_lt_or(code, p1, p2, r2, r3) ia64_cmp4_lt_or_pred ((code), 0, p1, p2, r2, r3) +#define ia64_cmp4_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3) + + +#define ia64_cmp_lt_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_eq_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3) + +#define ia64_cmp4_lt_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_eq_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3) + + +#define ia64_padd1(code, r1,r2,r3) ia64_padd1_pred ((code), 0, r1,r2,r3) +#define ia64_padd2(code, r1,r2,r3) ia64_padd2_pred ((code), 0, r1,r2,r3) +#define ia64_padd4(code, r1,r2,r3) ia64_padd4_pred ((code), 0, r1,r2,r3) +#define ia64_padd1_sss(code, r1,r2,r3) ia64_padd1_sss_pred ((code), 0, r1,r2,r3) +#define ia64_padd2_sss(code, r1,r2,r3) ia64_padd2_sss_pred ((code), 0, r1,r2,r3) +#define ia64_padd1_uuu(code, r1,r2,r3) ia64_padd1_uuu_pred ((code), 0, r1,r2,r3) +#define ia64_padd2_uuu(code, r1,r2,r3) ia64_padd2_uuu_pred ((code), 0, r1,r2,r3) +#define ia64_padd1_uus(code, r1,r2,r3) ia64_padd1_uus_pred ((code), 0, r1,r2,r3) +#define ia64_padd2_uus(code, r1,r2,r3) ia64_padd2_uus_pred ((code), 0, r1,r2,r3) + +#define ia64_psub1(code, r1,r2,r3) ia64_psub1_pred ((code), 0, r1,r2,r3) +#define ia64_psub2(code, r1,r2,r3) ia64_psub2_pred ((code), 0, r1,r2,r3) +#define ia64_psub4(code, r1,r2,r3) ia64_psub4_pred ((code), 0, r1,r2,r3) +#define ia64_psub1_sss(code, r1,r2,r3) ia64_psub1_sss_pred ((code), 0, r1,r2,r3) +#define ia64_psub2_sss(code, r1,r2,r3) ia64_psub2_sss_pred ((code), 0, r1,r2,r3) +#define ia64_psub1_uuu(code, r1,r2,r3) ia64_psub1_uuu_pred ((code), 0, r1,r2,r3) +#define ia64_psub2_uuu(code, r1,r2,r3) ia64_psub2_uuu_pred ((code), 0, r1,r2,r3) +#define ia64_psub1_uus(code, r1,r2,r3) ia64_psub1_uus_pred ((code), 0, r1,r2,r3) +#define ia64_psub2_uus(code, r1,r2,r3) ia64_psub2_uus_pred ((code), 0, r1,r2,r3) + +#define ia64_pavg1(code, r1,r2,r3) ia64_pavg1_pred ((code), 0, r1,r2,r3) +#define ia64_pavg2(code, r1,r2,r3) ia64_pavg2_pred ((code), 0, r1,r2,r3) +#define ia64_pavg1_raz(code, r1,r2,r3) ia64_pavg1_raz_pred ((code), 0, r1,r2,r3) +#define ia64_pavg2_raz(code, r1,r2,r3) ia64_pavg2_raz_pred ((code), 0, r1,r2,r3) +#define ia64_pavgsub1(code, r1,r2,r3) ia64_pavgsub1_pred ((code), 0, r1,r2,r3) +#define ia64_pavgsub2(code, r1,r2,r3) ia64_pavgsub2_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp1_eq(code, r1,r2,r3) ia64_pcmp1_eq_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp2_eq(code, r1,r2,r3) ia64_pcmp2_eq_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp4_eq(code, r1,r2,r3) ia64_pcmp4_eq_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp1_gt(code, r1,r2,r3) ia64_pcmp1_gt_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp2_gt(code, r1,r2,r3) ia64_pcmp2_gt_pred ((code), 0, r1,r2,r3) +#define ia64_pcmp4_gt(code, r1,r2,r3) ia64_pcmp4_gt_pred ((code), 0, r1,r2,r3) + + +#define ia64_pshladd2(code, r1, r2, r3, count) ia64_pshladd2_pred ((code), 0, r1, r2, r3, count) +#define ia64_pshradd2(code, r1, r2, r3, count) ia64_pshradd2_pred ((code), 0, r1, r2, r3, count) + +#define ia64_pmpyshr2(code, r1, r2, r3, count) ia64_pmpyshr2_pred ((code), 0, r1, r2, r3, count) + +#define ia64_pmpyshr2_u(code, r1, r2, r3, count) ia64_pmpyshr2_u_pred ((code), 0, r1, r2, r3, count) + + +#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3) +#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3) +#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3) +#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3) +#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3) +#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3) +#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3) +#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3) +#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3) +#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3) +#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3) +#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3) +#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3) + +#define ia64_mux1(code, r1, r2, mbtype) ia64_mux1_pred ((code), 0, r1, r2, mbtype) + + +#define ia64_mux2(code, r1, r2, mhtype) ia64_mux2_pred ((code), 0, r1, r2, mhtype) + + +#define ia64_pshr2(code, r1, r3, r2) ia64_pshr2_pred ((code), 0, r1, r3, r2) +#define ia64_pshr4(code, r1, r3, r2) ia64_pshr4_pred ((code), 0, r1, r3, r2) +#define ia64_shr(code, r1, r3, r2) ia64_shr_pred ((code), 0, r1, r3, r2) +#define ia64_pshr2_u(code, r1, r3, r2) ia64_pshr2_u_pred ((code), 0, r1, r3, r2) +#define ia64_pshr4_u(code, r1, r3, r2) ia64_pshr4_u_pred ((code), 0, r1, r3, r2) +#define ia64_shr_u(code, r1, r3, r2) ia64_shr_u_pred ((code), 0, r1, r3, r2) + + +#define ia64_pshr2_imm(code, r1, r3, count) ia64_pshr2_imm_pred ((code), 0, r1, r3, count) +#define ia64_pshr4_imm(code, r1, r3, count) ia64_pshr4_imm_pred ((code), 0, r1, r3, count) +#define ia64_pshr2_u_imm(code, r1, r3, count) ia64_pshr2_u_imm_pred ((code), 0, r1, r3, count) +#define ia64_pshr4_u_imm(code, r1, r3, count) ia64_pshr4_u_imm_pred ((code), 0, r1, r3, count) + + +#define ia64_pshl2(code, r1, r3, r2) ia64_pshl2_pred ((code), 0, r1, r3, r2) +#define ia64_pshl4(code, r1, r3, r2) ia64_pshl4_pred ((code), 0, r1, r3, r2) +#define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2) + + +#define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count) +#define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count) + + +#define ia64_popcnt(code, r1, r3) ia64_popcnt_pred ((code), 0, r1, r3) + + +#define ia64_shrp(code, r1, r2, r3, count) ia64_shrp_pred ((code), 0, r1, r2, r3, count) + + +#define ia64_extr_u(code, r1, r3, pos, len) ia64_extr_u_pred ((code), 0, r1, r3, pos, len) +#define ia64_extr(code, r1, r3, pos, len) ia64_extr_pred ((code), 0, r1, r3, pos, len) + + +#define ia64_dep_z(code, r1, r2, pos, len) ia64_dep_z_pred ((code), 0, r1, r2, pos, len) + + +#define ia64_dep_z_imm(code, r1, imm, pos, len) ia64_dep_z_imm_pred ((code), 0, r1, imm, pos, len) + + +#define ia64_dep_imm(code, r1, imm, r3, pos, len) ia64_dep_imm_pred ((code), 0, r1, imm, r3, pos, len) + + +#define ia64_dep(code, r1, r2, r3, pos, len) ia64_dep_pred ((code), 0, r1, r2, r3, pos, len) + + +#define ia64_tbit_z(code, p1, p2, r3, pos) ia64_tbit_z_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_z_unc(code, p1, p2, r3, pos) ia64_tbit_z_unc_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_z_and(code, p1, p2, r3, pos) ia64_tbit_z_and_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_nz_and(code, p1, p2, r3, pos) ia64_tbit_nz_and_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_z_or(code, p1, p2, r3, pos) ia64_tbit_z_or_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_nz_or(code, p1, p2, r3, pos) ia64_tbit_nz_or_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_z_or_andcm(code, p1, p2, r3, pos) ia64_tbit_z_or_andcm_pred ((code), 0, p1, p2, r3, pos) +#define ia64_tbit_nz_or_andcm(code, p1, p2, r3, pos) ia64_tbit_nz_or_andcm_pred ((code), 0, p1, p2, r3, pos) + + +#define ia64_tnat_z(code, p1, p2, r3) ia64_tnat_z_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_z_unc(code, p1, p2, r3) ia64_tnat_z_unc_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_z_and(code, p1, p2, r3) ia64_tnat_z_and_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_nz_and(code, p1, p2, r3) ia64_tnat_nz_and_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_z_or(code, p1, p2, r3) ia64_tnat_z_or_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_nz_or(code, p1, p2, r3) ia64_tnat_nz_or_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_z_or_andcm(code, p1, p2, r3) ia64_tnat_z_or_andcm_pred ((code), 0, p1, p2, r3) +#define ia64_tnat_nz_or_andcm(code, p1, p2, r3) ia64_tnat_nz_or_andcm_pred ((code), 0, p1, p2, r3) + + +#define ia64_nop_i(code, imm) ia64_nop_i_pred ((code), 0, imm) +#define ia64_hint_i(code, imm) ia64_hint_i_pred ((code), 0, imm) + + +#define ia64_break_i(code, imm) ia64_break_i_pred ((code), 0, imm) + + +#define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp) + +#define ia64_mov_to_breg(code, b1, r2, disp, wh, ih) ia64_mov_to_breg_pred ((code), 0, b1, r2, disp, wh, ih) +#define ia64_mov_ret_to_breg(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_breg_pred ((code), 0, b1, r2, disp, wh, ih) + + +#define ia64_mov_from_breg(code, r1, b2) ia64_mov_from_breg_pred ((code), 0, r1, b2) + + +#define ia64_mov_to_pred(code, r2, mask) ia64_mov_to_pred_pred ((code), 0, r2, mask) + + +#define ia64_mov_to_pred_rot_imm(code, imm) ia64_mov_to_pred_rot_imm_pred ((code), 0, imm) + + +#define ia64_mov_from_ip(code, r1) ia64_mov_from_ip_pred ((code), 0, r1) +#define ia64_mov_from_pred(code, r1) ia64_mov_from_pred_pred ((code), 0, r1) + + +#define ia64_mov_to_ar_i(code, ar3, r2) ia64_mov_to_ar_i_pred ((code), 0, ar3, r2) + + +#define ia64_mov_to_ar_imm_i(code, ar3, imm) ia64_mov_to_ar_imm_i_pred ((code), 0, ar3, imm) + + +#define ia64_mov_from_ar_i(code, r1, ar3) ia64_mov_from_ar_i_pred ((code), 0, r1, ar3) + + +#define ia64_zxt1(code, r1, r3) ia64_zxt1_pred ((code), 0, r1, r3) +#define ia64_zxt2(code, r1, r3) ia64_zxt2_pred ((code), 0, r1, r3) +#define ia64_zxt4(code, r1, r3) ia64_zxt4_pred ((code), 0, r1, r3) +#define ia64_sxt1(code, r1, r3) ia64_sxt1_pred ((code), 0, r1, r3) +#define ia64_sxt2(code, r1, r3) ia64_sxt2_pred ((code), 0, r1, r3) +#define ia64_sxt4(code, r1, r3) ia64_sxt4_pred ((code), 0, r1, r3) +#define ia64_czx1_l(code, r1, r3) ia64_czx1_l_pred ((code), 0, r1, r3) +#define ia64_czx2_l(code, r1, r3) ia64_czx2_l_pred ((code), 0, r1, r3) +#define ia64_czx1_r(code, r1, r3) ia64_czx1_r_pred ((code), 0, r1, r3) +#define ia64_czx2_r(code, r1, r3) ia64_czx2_r_pred ((code), 0, r1, r3) + +#define ia64_ld1_hint(code, r1, r3, hint) ia64_ld1_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_hint(code, r1, r3, hint) ia64_ld2_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_hint(code, r1, r3, hint) ia64_ld4_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_hint(code, r1, r3, hint) ia64_ld8_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_s_hint(code, r1, r3, hint) ia64_ld1_s_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_s_hint(code, r1, r3, hint) ia64_ld2_s_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_s_hint(code, r1, r3, hint) ia64_ld4_s_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_s_hint(code, r1, r3, hint) ia64_ld8_s_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_a_hint(code, r1, r3, hint) ia64_ld1_a_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_a_hint(code, r1, r3, hint) ia64_ld2_a_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_a_hint(code, r1, r3, hint) ia64_ld4_a_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_a_hint(code, r1, r3, hint) ia64_ld8_a_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_sa_hint(code, r1, r3, hint) ia64_ld1_sa_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_sa_hint(code, r1, r3, hint) ia64_ld2_sa_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_sa_hint(code, r1, r3, hint) ia64_ld4_sa_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_sa_hint(code, r1, r3, hint) ia64_ld8_sa_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_bias_hint(code, r1, r3, hint) ia64_ld1_bias_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_bias_hint(code, r1, r3, hint) ia64_ld2_bias_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_bias_hint(code, r1, r3, hint) ia64_ld4_bias_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_bias_hint(code, r1, r3, hint) ia64_ld8_bias_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_acq_hint(code, r1, r3, hint) ia64_ld1_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_acq_hint(code, r1, r3, hint) ia64_ld2_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_acq_hint(code, r1, r3, hint) ia64_ld4_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_acq_hint(code, r1, r3, hint) ia64_ld8_acq_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld8_fill_hint(code, r1, r3, hint) ia64_ld8_fill_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_c_clr_hint(code, r1, r3, hint) ia64_ld1_c_clr_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_c_clr_hint(code, r1, r3, hint) ia64_ld2_c_clr_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_c_clr_hint(code, r1, r3, hint) ia64_ld4_c_clr_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_c_clr_hint(code, r1, r3, hint) ia64_ld8_c_clr_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_c_nc_hint(code, r1, r3, hint) ia64_ld1_c_nc_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_c_nc_hint(code, r1, r3, hint) ia64_ld2_c_nc_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_c_nc_hint(code, r1, r3, hint) ia64_ld4_c_nc_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_c_nc_hint(code, r1, r3, hint) ia64_ld8_c_nc_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld1_c_clr_acq_hint(code, r1, r3, hint) ia64_ld1_c_clr_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld2_c_clr_acq_hint(code, r1, r3, hint) ia64_ld2_c_clr_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld4_c_clr_acq_hint(code, r1, r3, hint) ia64_ld4_c_clr_acq_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld8_c_clr_acq_hint(code, r1, r3, hint) ia64_ld8_c_clr_acq_hint_pred ((code), 0, r1, r3, hint) + +#define ia64_ld16_hint(code, r1, r3, hint) ia64_ld16_hint_pred ((code), 0, r1, r3, hint) +#define ia64_ld16_acq_hint(code, r1, r3, hint) ia64_ld16_acq_hint_pred ((code), 0, r1, r3, hint) + + +#define ia64_ld1_inc_hint(code, r1, r2, r3, hint) ia64_ld1_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_inc_hint(code, r1, r2, r3, hint) ia64_ld2_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_inc_hint(code, r1, r2, r3, hint) ia64_ld4_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_inc_hint(code, r1, r2, r3, hint) ia64_ld8_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_s_inc_hint(code, r1, r2, r3, hint) ia64_ld1_s_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_s_inc_hint(code, r1, r2, r3, hint) ia64_ld2_s_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_s_inc_hint(code, r1, r2, r3, hint) ia64_ld4_s_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_s_inc_hint(code, r1, r2, r3, hint) ia64_ld8_s_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_a_inc_hint(code, r1, r2, r3, hint) ia64_ld1_a_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_a_inc_hint(code, r1, r2, r3, hint) ia64_ld2_a_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_a_inc_hint(code, r1, r2, r3, hint) ia64_ld4_a_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_a_inc_hint(code, r1, r2, r3, hint) ia64_ld8_a_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld1_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld2_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld4_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld8_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld1_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld2_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld4_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld8_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld8_fill_inc_hint(code, r1, r2, r3, hint) ia64_ld8_fill_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint) + +#define ia64_ld1_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld2_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld4_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) +#define ia64_ld8_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint) + + +#define ia64_ld1_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld8_fill_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_fill_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + +#define ia64_ld1_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld2_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld4_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +#define ia64_ld8_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) + + +#define ia64_st1_hint(code, r3, r2, hint) ia64_st1_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st2_hint(code, r3, r2, hint) ia64_st2_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st4_hint(code, r3, r2, hint) ia64_st4_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st8_hint(code, r3, r2, hint) ia64_st8_hint_pred ((code), 0, r3, r2, hint) + +#define ia64_st1_rel_hint(code, r3, r2, hint) ia64_st1_rel_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st2_rel_hint(code, r3, r2, hint) ia64_st2_rel_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st4_rel_hint(code, r3, r2, hint) ia64_st4_rel_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st8_rel_hint(code, r3, r2, hint) ia64_st8_rel_hint_pred ((code), 0, r3, r2, hint) + +#define ia64_st8_spill_hint(code, r3, r2, hint) ia64_st8_spill_hint_pred ((code), 0, r3, r2, hint) + +#define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint) +#define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint) + + +#define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st4_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st8_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) + +#define ia64_st1_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st2_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st4_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) +#define ia64_st8_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) + +#define ia64_st8_spill_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_spill_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) + + +#define ia64_ldfs_hint(code, f1, r3, hint) ia64_ldfs_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_hint(code, f1, r3, hint) ia64_ldfd_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_hint(code, f1, r3, hint) ia64_ldf8_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_hint(code, f1, r3, hint) ia64_ldfe_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldfs_s_hint(code, f1, r3, hint) ia64_ldfs_s_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_s_hint(code, f1, r3, hint) ia64_ldfd_s_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_s_hint(code, f1, r3, hint) ia64_ldf8_s_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_s_hint(code, f1, r3, hint) ia64_ldfe_s_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldfs_a_hint(code, f1, r3, hint) ia64_ldfs_a_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_a_hint(code, f1, r3, hint) ia64_ldfd_a_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_a_hint(code, f1, r3, hint) ia64_ldf8_a_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_a_hint(code, f1, r3, hint) ia64_ldfe_a_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldfs_sa_hint(code, f1, r3, hint) ia64_ldfs_sa_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_sa_hint(code, f1, r3, hint) ia64_ldfd_sa_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_sa_hint(code, f1, r3, hint) ia64_ldf8_sa_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_sa_hint(code, f1, r3, hint) ia64_ldfe_sa_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldfs_c_clr_hint(code, f1, r3, hint) ia64_ldfs_c_clr_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_c_clr_hint(code, f1, r3, hint) ia64_ldfd_c_clr_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_c_clr_hint(code, f1, r3, hint) ia64_ldf8_c_clr_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_c_clr_hint(code, f1, r3, hint) ia64_ldfe_c_clr_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldfs_c_nc_hint(code, f1, r3, hint) ia64_ldfs_c_nc_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfd_c_nc_hint(code, f1, r3, hint) ia64_ldfd_c_nc_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldf8_c_nc_hint(code, f1, r3, hint) ia64_ldf8_c_nc_hint_pred ((code), 0, f1, r3, hint) +#define ia64_ldfe_c_nc_hint(code, f1, r3, hint) ia64_ldfe_c_nc_hint_pred ((code), 0, f1, r3, hint) + +#define ia64_ldf_fill_hint(code, f1, r3, hint) ia64_ldf_fill_hint_pred ((code), 0, f1, r3, hint) + + +#define ia64_ldfs_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldfs_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_s_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_s_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_s_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_s_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_s_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldfs_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_a_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_a_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_a_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_a_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_a_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldfs_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldfs_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldfs_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfd_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldf8_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint) +#define ia64_ldfe_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint) + +#define ia64_ldf_fill_inc_hint(code, f1, r3, r2, hint) ia64_ldf_fill_inc_hint_pred ((code), 0, f1, r3, r2, hint) + + +#define ia64_ldfs_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldfs_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldfs_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldfs_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldfs_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldfs_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfd_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldf8_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +#define ia64_ldfe_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + +#define ia64_ldf_fill_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf_fill_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) + + +#define ia64_stfs_hint(code, r3, f2, hint) ia64_stfs_hint_pred ((code), 0, r3, f2, hint) +#define ia64_stfd_hint(code, r3, f2, hint) ia64_stfd_hint_pred ((code), 0, r3, f2, hint) +#define ia64_stf8_hint(code, r3, f2, hint) ia64_stf8_hint_pred ((code), 0, r3, f2, hint) +#define ia64_stfe_hint(code, r3, f2, hint) ia64_stfe_hint_pred ((code), 0, r3, f2, hint) +#define ia64_stf_spill_hint(code, r3, f2, hint) ia64_stf_spill_hint_pred ((code), 0, r3, f2, hint) + + +#define ia64_stfs_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfs_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint) +#define ia64_stfd_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfd_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint) +#define ia64_stf8_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf8_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint) +#define ia64_stfe_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfe_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint) +#define ia64_stf_spill_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf_spill_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint) + + +#define ia64_ldfps_hint(code, f1, f2, r3, hint) ia64_ldfps_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_hint(code, f1, f2, r3, hint) ia64_ldfpd_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_hint(code, f1, f2, r3, hint) ia64_ldfp8_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_s_hint(code, f1, f2, r3, hint) ia64_ldfps_s_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_s_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_s_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_a_hint(code, f1, f2, r3, hint) ia64_ldfps_a_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_a_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_a_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_sa_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_sa_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_sa_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_hint_pred ((code), 0, f1, f2, r3, hint) + + +#define ia64_ldfps_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_s_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_a_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_ldfps_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfpd_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint) +#define ia64_ldfp8_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint) + +#define ia64_lfetch_hint(code, r3, hint) ia64_lfetch_hint_pred ((code), 0, r3, hint) +#define ia64_lfetch_excl_hint(code, r3, hint) ia64_lfetch_excl_hint_pred ((code), 0, r3, hint) +#define ia64_lfetch_fault_hint(code, r3, hint) ia64_lfetch_fault_hint_pred ((code), 0, r3, hint) +#define ia64_lfetch_fault_excl_hint(code, r3, hint) ia64_lfetch_fault_excl_hint_pred ((code), 0, r3, hint) + + +#define ia64_lfetch_inc_hint(code, r3, r2, hint) ia64_lfetch_inc_hint_pred ((code), 0, r3, r2, hint) +#define ia64_lfetch_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_excl_inc_hint_pred ((code), 0, r3, r2, hint) +#define ia64_lfetch_fault_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_inc_hint_pred ((code), 0, r3, r2, hint) +#define ia64_lfetch_fault_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_excl_inc_hint_pred ((code), 0, r3, r2, hint) + + +#define ia64_lfetch_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_inc_imm_hint_pred ((code), 0, r3, imm, hint) +#define ia64_lfetch_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint) +#define ia64_lfetch_fault_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_inc_imm_hint_pred ((code), 0, r3, imm, hint) +#define ia64_lfetch_fault_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint) + + +#define ia64_cmpxchg1_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_acq_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg2_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_acq_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg4_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_acq_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg8_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_acq_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg1_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_rel_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg2_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_rel_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg4_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_rel_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg8_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_rel_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg16_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_acq_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_cmpxchg16_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_rel_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_xchg1_hint(code, r1, r3, r2, hint) ia64_xchg1_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_xchg2_hint(code, r1, r3, r2, hint) ia64_xchg2_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_xchg4_hint(code, r1, r3, r2, hint) ia64_xchg4_hint_pred ((code), 0, r1, r3, r2, hint) +#define ia64_xchg8_hint(code, r1, r3, r2, hint) ia64_xchg8_hint_pred ((code), 0, r1, r3, r2, hint) + +#define ia64_fetchadd4_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd4_acq_hint_pred ((code), 0, r1, r3, inc, hint) +#define ia64_fetchadd8_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd8_acq_hint_pred ((code), 0, r1, r3, inc, hint) +#define ia64_fetchadd4_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd4_rel_hint_pred ((code), 0, r1, r3, inc, hint) +#define ia64_fetchadd8_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd8_rel_hint_pred ((code), 0, r1, r3, inc, hint) + + +#define ia64_setf_sig(code, f1, r2) ia64_setf_sig_pred ((code), 0, f1, r2) +#define ia64_setf_exp(code, f1, r2) ia64_setf_exp_pred ((code), 0, f1, r2) +#define ia64_setf_s(code, f1, r2) ia64_setf_s_pred ((code), 0, f1, r2) +#define ia64_setf_d(code, f1, r2) ia64_setf_d_pred ((code), 0, f1, r2) + + +#define ia64_getf_sig(code, r1, f2) ia64_getf_sig_pred ((code), 0, r1, f2) +#define ia64_getf_exp(code, r1, f2) ia64_getf_exp_pred ((code), 0, r1, f2) +#define ia64_getf_s(code, r1, f2) ia64_getf_s_pred ((code), 0, r1, f2) +#define ia64_getf_d(code, r1, f2) ia64_getf_d_pred ((code), 0, r1, f2) + + +#define ia64_chk_s_m(code, r2,disp) ia64_chk_s_m_pred ((code), 0, r2,disp) + + +#define ia64_chk_s_float_m(code, f2,disp) ia64_chk_s_float_m_pred ((code), 0, f2,disp) + + +#define ia64_chk_a_nc(code, r1,disp) ia64_chk_a_nc_pred ((code), 0, r1,disp) +#define ia64_chk_a_clr(code, r1,disp) ia64_chk_a_clr_pred ((code), 0, r1,disp) + + +#define ia64_chk_a_nc_float(code, f1,disp) ia64_chk_a_nc_float_pred ((code), 0, f1,disp) +#define ia64_chk_a_clr_float(code, f1,disp) ia64_chk_a_clr_float_pred ((code), 0, f1,disp) + + +#define ia64_invala(code) ia64_invala_pred ((code), 0) +#define ia64_fwb(code) ia64_fwb_pred ((code), 0) +#define ia64_mf(code) ia64_mf_pred ((code), 0) +#define ia64_mf_a(code) ia64_mf_a_pred ((code), 0) +#define ia64_srlz_d(code) ia64_srlz_d_pred ((code), 0) +#define ia64_stlz_i(code) ia64_stlz_i_pred ((code), 0) +#define ia64_sync_i(code) ia64_sync_i_pred ((code), 0) + + +#define ia64_flushrs(code) ia64_flushrs_pred ((code), 0) +#define ia64_loadrs(code) ia64_loadrs_pred ((code), 0) + +#define ia64_invala_e(code, r1) ia64_invala_e_pred ((code), 0, r1) + + +#define ia64_invala_e_float(code, f1) ia64_invala_e_float_pred ((code), 0, f1) + + +#define ia64_fc(code, r3) ia64_fc_pred ((code), 0, r3) +#define ia64_fc_i(code, r3) ia64_fc_i_pred ((code), 0, r3) + + +#define ia64_mov_to_ar_m(code, ar3, r2) ia64_mov_to_ar_m_pred ((code), 0, ar3, r2) + + +#define ia64_mov_to_ar_imm_m(code, ar3, imm) ia64_mov_to_ar_imm_m_pred ((code), 0, ar3, imm) + + +#define ia64_mov_from_ar_m(code, r1, ar3) ia64_mov_from_ar_m_pred ((code), 0, r1, ar3) + +#define ia64_mov_to_cr(code, cr3, r2) ia64_mov_to_cr_pred ((code), 0, cr3, r2) + + +#define ia64_mov_from_cr(code, r1, cr3) ia64_mov_from_cr_pred ((code), 0, r1, cr3) + + +#define ia64_alloc(code, r1, i, l, o, r) ia64_alloc_pred ((code), 0, r1, i, l, o, r) + + +#define ia64_mov_to_psr_l(code, r2) ia64_mov_to_psr_l_pred ((code), 0, r2) +#define ia64_mov_to_psr_um(code, r2) ia64_mov_to_psr_um_pred ((code), 0, r2) + + +#define ia64_mov_from_psr(code, r1) ia64_mov_from_psr_pred ((code), 0, r1) +#define ia64_mov_from_psr_um(code, r1) ia64_mov_from_psr_um_pred ((code), 0, r1) + + +#define ia64_break_m(code, imm) ia64_break_m_pred ((code), 0, imm) + +/* The System/Memory Management instruction encodings (M38-M47) */ + + +#define ia64_nop_m(code, imm) ia64_nop_m_pred ((code), 0, imm) +#define ia64_hint_m(code, imm) ia64_hint_m_pred ((code), 0, imm) + +#define ia64_br_cond_hint(code, disp, bwh, ph, dh) ia64_br_cond_hint_pred ((code), 0, disp, bwh, ph, dh) +#define ia64_br_wexit_hint(code, disp, bwh, ph, dh) ia64_br_wexit_hint_pred ((code), 0, disp, bwh, ph, dh) +#define ia64_br_wtop_hint(code, disp, bwh, ph, dh) ia64_br_wtop_hint_pred ((code), 0, disp, bwh, ph, dh) + + +#define ia64_br_cloop_hint(code, disp, bwh, ph, dh) ia64_br_cloop_hint_pred ((code), 0, disp, bwh, ph, dh) +#define ia64_br_cexit_hint(code, disp, bwh, ph, dh) ia64_br_cexit_hint_pred ((code), 0, disp, bwh, ph, dh) +#define ia64_br_ctop_hint(code, disp, bwh, ph, dh) ia64_br_ctop_hint_pred ((code), 0, disp, bwh, ph, dh) + + +#define ia64_br_call_hint(code, b1, disp, bwh, ph, dh) ia64_br_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh) + + +#define ia64_br_cond_reg_hint(code, b1, bwh, ph, dh) ia64_br_cond_reg_hint_pred ((code), 0, b1, bwh, ph, dh) +#define ia64_br_ia_reg_hint(code, b1, bwh, ph, dh) ia64_br_ia_reg_hint_pred ((code), 0, b1, bwh, ph, dh) +#define ia64_br_ret_reg_hint(code, b1, bwh, ph, dh) ia64_br_ret_reg_hint_pred ((code), 0, b1, bwh, ph, dh) + + +#define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh) + +#define ia64_cover(code) ia64_cover_pred ((code), 0) +#define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0) +#define ia64_clrrrb_pr(code) ia64_clrrrb_pr_pred ((code), 0) +#define ia64_rfi(code) ia64_rfi_pred ((code), 0) +#define ia64_bsw_0(code) ia64_bsw_0_pred ((code), 0) +#define ia64_bsw_1(code) ia64_bsw_1_pred ((code), 0) +#define ia64_epc(code) ia64_epc_pred ((code), 0) + + +#define ia64_break_b(code, imm) ia64_break_b_pred ((code), 0, imm) +#define ia64_nop_b(code, imm) ia64_nop_b_pred ((code), 0, imm) +#define ia64_hint_b(code, imm) ia64_hint_b_pred ((code), 0, imm) + + +#define ia64_break_x(code, imm) ia64_break_x_pred ((code), 0, imm) + + +#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, r1, imm) + + +#define ia64_brl_cond_hint(code, disp, bwh, ph, dh) ia64_brl_cond_hint_pred ((code), 0, disp, bwh, ph, dh) + + +#define ia64_brl_call_hint(code, b1, disp, bwh, ph, dh) ia64_brl_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh) + + +#define ia64_nop_x(code, imm) ia64_nop_x_pred ((code), 0, imm) +#define ia64_hint_x(code, imm) ia64_hint_x_pred ((code), 0, imm) + +#endif -- cgit v1.1 From 061e9ab4d483c98d6747caad5160bd30fbbf09ab Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 14 May 2005 19:52:56 +0000 Subject: 2005-05-14 Zoltan Varga * Makefile.am: Only compile libmonoarch if the interpreter is compiled. svn path=/trunk/mono/; revision=44526 --- ChangeLog | 2 ++ Makefile.am | 2 ++ 2 files changed, 4 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8122045..f400d6c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2005-05-14 Zoltan Varga + * Makefile.am: Only compile libmonoarch if the interpreter is compiled. + * ia64/ia64-codegen.h: Add IA64 code generation macros. * Makefile.am: Add ia64 subdir. diff --git a/Makefile.am b/Makefile.am index 57c353d..e006bec 100644 --- a/Makefile.am +++ b/Makefile.am @@ -3,11 +3,13 @@ DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +if INTERP_SUPPORTED noinst_LTLIBRARIES = libmonoarch.la libmonoarch_la_SOURCES = unknown.c libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la +endif EXTRA_DIST = ChangeLog -- cgit v1.1 From 3f053b86a49d8c41d47ca2ff771bda64ee5a5ddc Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 18 May 2005 18:55:54 +0000 Subject: 2005-05-18 Zoltan Varga * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter. svn path=/trunk/mono/; revision=44705 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index f400d6c..1a51d5f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-18 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter. + 2005-05-14 Zoltan Varga * Makefile.am: Only compile libmonoarch if the interpreter is compiled. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 7f2955f..8126cfe 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -82,8 +82,8 @@ static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); * now. Also, we emit 1 ins + 2 nops. */ -#define ia64_codegen_init(code, buf) do { \ - code.buf = buf; \ +#define ia64_codegen_init(code, codegen_buf) do { \ + code.buf = codegen_buf; \ code.nins = 0; \ } while (0) -- cgit v1.1 From 1d94e7499dc18c3882f4aa16e977ceeaacddd466 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 18 May 2005 23:02:39 +0000 Subject: 2005-05-19 Zoltan Varga * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work. svn path=/trunk/mono/; revision=44722 --- ChangeLog | 4 + ia64/codegen.c | 43 +++++++-- ia64/ia64-codegen.h | 268 +++++++++++++++++++++++++++++++++------------------- 3 files changed, 211 insertions(+), 104 deletions(-) diff --git a/ChangeLog b/ChangeLog index 1a51d5f..5fee199 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-19 Zoltan Varga + + * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work. + 2005-05-18 Zoltan Varga * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter. diff --git a/ia64/codegen.c b/ia64/codegen.c index 3f9e60d..7d5e71a 100644 --- a/ia64/codegen.c +++ b/ia64/codegen.c @@ -35,8 +35,13 @@ mono_disassemble_code (guint8 *code, int size, char *id) fclose (ofd); +#ifdef __ia64__ +#define DIS_CMD "objdump -d" +#define AS_CMD "as" +#else #define DIS_CMD "ia64-linux-gnu-objdump -d" #define AS_CMD "ia64-linux-gnu-as" +#endif o_file = g_strdup_printf ("%s/test.o", tmp); cmd = g_strdup_printf (AS_CMD " %s -o %s", as_file, o_file); @@ -309,13 +314,13 @@ main () ia64_chk_s_i (code, 1, -1); ia64_chk_s_i (code, 1, 1); - ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); - ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); - ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); - ia64_mov_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); - ia64_mov_ret_to_breg (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); + ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); + ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); + ia64_mov_ret_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); - ia64_mov_from_breg (code, 1, 1); + ia64_mov_from_br (code, 1, 1); ia64_mov_to_pred (code, 1, 0xfe); @@ -722,6 +727,32 @@ main () ia64_codegen_close (code); + /* disassembly */ + { + guint8 *buf = code.buf; + int template; + guint64 dw1, dw2; + guint64 ins1, ins2, ins3; + + ia64_break_i (code, 0x1234); + + ia64_codegen_close (code); + + dw1 = ((guint64*)buf) [0]; + dw2 = ((guint64*)buf) [1]; + + template = ia64_bundle_template (buf); + ins1 = ia64_bundle_ins1 (buf); + ins2 = ia64_bundle_ins2 (buf); + ins3 = ia64_bundle_ins3 (buf); + + code.buf = buf; + ia64_emit_bundle_template (&code, template, ins1, ins2, ins3); + + g_assert (dw1 == ((guint64*)buf) [0]); + g_assert (dw2 == ((guint64*)buf) [1]); + } + mono_disassemble_code (buf, 40960, "code"); return 0; diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 8126cfe..746fd84 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -56,6 +56,71 @@ typedef enum { IA64_TEMPLATE_UNUS8 = 0x1F, } Ia64BundleTemplate; +typedef enum { + IA64_R0 = 0, + IA64_R1 = 1, + IA64_R2 = 2, + IA64_R3 = 3, + IA64_R4 = 4, + IA64_R5 = 5, + IA64_R6 = 6, + IA64_R7 = 7, + IA64_R8 = 8, + IA64_R9 = 9, + IA64_R10 = 10, + IA64_R11 = 11, + IA64_R12 = 12, + IA64_R13 = 13, + IA64_R14 = 14, + IA64_R15 = 15, + IA64_R16 = 16, + IA64_R17 = 17, + IA64_R18 = 18, + IA64_R19 = 19, + IA64_R20 = 20, + IA64_R21 = 21, + IA64_R22 = 22, + IA64_R23 = 23, + IA64_R24 = 24, + IA64_R25 = 25, + IA64_R26 = 26, + IA64_R27 = 27, + IA64_R28 = 28, + IA64_R29 = 29, + IA64_R30 = 30, + IA64_R31 = 31, + + /* Aliases */ + IA64_GP = IA64_R1, + IA64_SP = IA64_R12, + IA64_TP = IA64_R13 +} Ia64GeneralRegister; + +typedef enum { + IA64_B0 = 0, + IA64_B1 = 1, + IA64_B2 = 2, + IA64_B3 = 3, + IA64_B4 = 4, + IA64_B5 = 5, + IA64_B6 = 6, + IA64_B7 = 7 +} Ia64BranchRegister; + +typedef enum { + IA64_PFS = 64 +} Ia64ApplicationRegister; + +/* disassembly */ +#define ia64_bundle_template(code) ((*(guint64*)code) & 0x1f) +#define ia64_bundle_ins1(code) (((*(guint64*)code) >> 5) & 0x1ffffffffff) +#define ia64_bundle_ins2(code) (((*(guint64*)code) >> 46) | ((((guint64*)code)[1] & 0x3ffff) << 18)) +#define ia64_bundle_ins3(code) ((((guint64*)code)[1]) >> 23) + +#define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) +#define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f) +#define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) + #define IA64_NOP_I ((0x01 << 27)) #define IA64_NOP_M ((0x01 << 27)) @@ -112,9 +177,9 @@ static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); guint64 dw1, dw2; \ dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \ dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \ - ((guint64*)code->buf)[0] = dw1; \ - ((guint64*)code->buf)[1] = dw2; \ - code->buf += 16; \ + ((guint64*)(code)->buf)[0] = dw1; \ + ((guint64*)(code)->buf)[1] = dw2; \ + (code)->buf += 16; \ } while (0) static void @@ -214,7 +279,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11)))) -#define ia64_a1(code2, qp, r1, r2, r3, x2a, ve, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) +#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) #define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0) #define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1) @@ -226,12 +291,12 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2) #define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3) -#define ia64_a2(code2, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) +#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) #define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count)) #define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count)) -#define ia64_a3(code2, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { check_greg ((r1)); check_greg ((r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0) +#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { check_greg ((r1)); check_greg ((r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0) #define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1) #define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0) @@ -239,16 +304,16 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2) #define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3) -#define ia64_a4(code2, qp, r1, imm14, r3, x2a, ve) do { check_greg ((r1)); check_greg ((r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0) +#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { check_greg ((r1)); check_greg ((r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0) #define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0) #define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0) -#define ia64_a5(code2, qp, r1, imm, r3) do { check_greg ((r1)); check_greg ((r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0) +#define ia64_a5(code, qp, r1, imm, r3) do { check_greg ((r1)); check_greg ((r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0) #define ia64_addl_imm_pred(code, qp,r1,imm22,r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3)) -#define ia64_a6(code2, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) +#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) #define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0) #define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0) @@ -293,7 +358,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p1), (p2), (r3), (r2)) #define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p2), (p1), (r2), (r3)) -#define ia64_a7(code2, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) +#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) #define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0) #define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0) @@ -321,7 +386,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1) #define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1) -#define ia64_a8(code2, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { check_greg ((r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) +#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { check_greg ((r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) #define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0) #define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0) @@ -349,7 +414,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1) #define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1) -#define ia64_a9(code2, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) +#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) #define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0) #define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0) @@ -384,20 +449,20 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1) #define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1) -#define ia64_a10(code2, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) +#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) #define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count); #define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count); #define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3))) -#define ia64_i1(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { check_gregs ((r1), (r2), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { check_gregs ((r1), (r2), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count)); #define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count)); -#define ia64_i2(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3) #define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3) @@ -430,15 +495,15 @@ typedef enum { IA64_MUX1_REV = 0xb } Ia64Mux1Permutation; -#define ia64_i3(code2, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) +#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) #define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2) -#define ia64_i4(code2, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) +#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) #define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2) -#define ia64_i5(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0) #define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0) @@ -447,54 +512,54 @@ typedef enum { #define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0) #define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0) -#define ia64_i6(code2, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0) #define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0) #define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0) #define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0) -#define ia64_i7(code2, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshl2_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) #define ia64_pshl4_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) #define ia64_shl_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1) -#define ia64_i8(code2, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), 0); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), 0); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1) #define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1) -#define ia64_i9(code2, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2) -#define ia64_i10(code2, qp, r1, r2, r3, count, opcode, x2, x) do { check_gregs ((r1), (r2), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0) +#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { check_gregs ((r1), (r2), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0) #define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0) -#define ia64_i11(code2, qp, r1, r3, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) +#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) #define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0) #define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1) -#define ia64_i12(code2, qp, r1, r2, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) +#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) #define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0) -#define ia64_i13(code2, qp, r1, imm, pos, len, x2, x, y) do { ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1) -#define ia64_i14(code2, qp, r1, imm, r3, pos, len, x2, x) do { check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1) -#define ia64_i15(code2, qp, r1, r2, r3, pos, len) do { check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0) +#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0) #define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len)) -#define ia64_i16(code2, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) +#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) #define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0) #define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1) @@ -505,7 +570,7 @@ typedef enum { #define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0) #define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1) -#define ia64_i17(code2, qp, p1, p2, r3, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) +#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) #define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0) #define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1) @@ -516,20 +581,20 @@ typedef enum { #define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0) #define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1) -#define ia64_i18(code2, qp, imm, x3, x6, y) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_i18(code, qp, imm, x3, x6, y) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0) #define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1) -#define ia64_i19(code2, qp, imm, x3, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_i19(code, qp, imm, x3, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0) -#define ia64_i20(code2, qp, r2, imm, x3) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i20(code, qp, r2, imm, x3) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1) -#define ia64_i21(code2, qp, b1, r2, tag13, x3, x, ih, wh) do { check_imm8 (tag13); check_gregs (0, (r2), 0); check_breg ((b1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0) +#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { check_imm8 (tag13); check_gregs (0, (r2), 0); check_breg ((b1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0) typedef enum { IA64_MOV_TO_BR_WH_SPTK = 0, @@ -542,39 +607,39 @@ typedef enum { IA64_BR_IH_IMP = 1 } Ia64BranchImportanceHint; -#define ia64_mov_to_breg_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh) -#define ia64_mov_ret_to_breg_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh) +#define ia64_mov_to_br_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh) +#define ia64_mov_ret_to_br_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh) -#define ia64_i22(code2, qp, r1, b2, x3, x6) do { check_gregs ((r1), 0, 0); check_breg ((b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i22(code, qp, r1, b2, x3, x6) do { check_gregs ((r1), 0, 0); check_breg ((b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) -#define ia64_mov_from_breg_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31); +#define ia64_mov_from_br_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31); -#define ia64_i23(code2, qp, r2, mask, x3) do { check_greg ((r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0) +#define ia64_i23(code, qp, r2, mask, x3) do { check_greg ((r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0) #define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3) -#define ia64_i24(code2, qp, imm, x3) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i24(code, qp, imm, x3) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2) -#define ia64_i25(code2, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i25(code, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30) #define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33) -#define ia64_i26(code2, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i26(code, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a) -#define ia64_i27(code2, qp, ar3, imm, x3, x6) do { check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i27(code, qp, ar3, imm, x3, x6) do { check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a) -#define ia64_i28(code2, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i28(code, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32) -#define ia64_i29(code2, qp, r1, r3, x3, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i29(code, qp, r1, r3, x3, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10) #define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11) @@ -602,7 +667,7 @@ typedef enum { IA64_ST_HINT_NTA = 3 } Ia64StoreHint; -#define ia64_m1(code2, qp, r1, r3, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00) #define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01) @@ -654,7 +719,7 @@ typedef enum { #define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28) #define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C) -#define ia64_m2(code2, qp, r1, r2, r3, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00) #define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01) @@ -703,7 +768,7 @@ typedef enum { #define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A) #define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B) -#define ia64_m3(code2, qp, r1, r3, imm, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00) #define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01) @@ -752,7 +817,7 @@ typedef enum { #define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A) #define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B) -#define ia64_m4(code2, qp, r3, r2, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30) #define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31) @@ -769,7 +834,7 @@ typedef enum { #define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30) #define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34) -#define ia64_m5(code2, qp, r3, r2, imm, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30) #define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31) @@ -783,7 +848,7 @@ typedef enum { #define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B) -#define ia64_m6(code2, qp, f1, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02) #define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03) @@ -817,7 +882,7 @@ typedef enum { #define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B) -#define ia64_m7(code2, qp, f1, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02) #define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03) @@ -851,7 +916,7 @@ typedef enum { #define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B) -#define ia64_m8(code2, qp, f1, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02) #define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03) @@ -885,7 +950,7 @@ typedef enum { #define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B) -#define ia64_m9(code2, qp, r3, f2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32) #define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33) @@ -893,7 +958,7 @@ typedef enum { #define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30) #define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B) -#define ia64_m10(code2, qp, r3, f2, imm, hint, x6) do { check_greg ((r3)); check_freg ((f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { check_greg ((r3)); check_freg ((f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32) #define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33) @@ -901,7 +966,7 @@ typedef enum { #define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30) #define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B) -#define ia64_m11(code2, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02) #define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03) @@ -927,7 +992,7 @@ typedef enum { #define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27) #define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25) -#define ia64_m12(code2, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02) #define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03) @@ -960,28 +1025,28 @@ typedef enum { IA64_LFHINT_NTA = 3 } Ia64LinePrefetchHint; -#define ia64_m13(code2, qp, r3, hint, m, x, x6) do { check_greg ((r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m13(code, qp, r3, hint, m, x, x6) do { check_greg ((r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C) #define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D) #define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E) #define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F) -#define ia64_m14(code2, qp, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_greg ((r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_greg ((r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C) #define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D) #define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E) #define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F) -#define ia64_m15(code2, qp, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m15(code, qp, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C) #define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D) #define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E) #define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F) -#define ia64_m16(code2, qp, r1, r3, r2, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00) #define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01) @@ -1000,46 +1065,46 @@ typedef enum { #define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3))) -#define ia64_m17(code2, qp, r1, r3, imm, hint, m, x, x6) do { int aimm = (imm) < 0 ? - (imm) : (imm); check_gregs ((r1), 0, (r3)); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm = (imm) < 0 ? - (imm) : (imm); check_gregs ((r1), 0, (r3)); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16) #define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17) -#define ia64_m18(code2, qp, f1, r2, m, x, x6) do { check_greg ((r2)); check_freg ((f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m18(code, qp, f1, r2, m, x, x6) do { check_greg ((r2)); check_freg ((f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C) #define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D) #define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E) #define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F) -#define ia64_m19(code2, qp, r1, f2, m, x, x6) do { check_greg ((r1)); check_freg ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m19(code, qp, r1, f2, m, x, x6) do { check_greg ((r1)); check_freg ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C) #define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D) #define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E) #define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F) -#define ia64_m20(code2, qp, r2, imm, x3) do { check_greg ((r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) +#define ia64_m20(code, qp, r2, imm, x3) do { check_greg ((r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) #define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1) -#define ia64_m21(code2, qp, f2, imm, x3) do { check_freg ((f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) +#define ia64_m21(code, qp, f2, imm, x3) do { check_freg ((f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) #define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3) -#define ia64_m22(code2, qp, r1, imm, x3) do { check_greg ((r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m22(code, qp, r1, imm, x3) do { check_greg ((r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4) #define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5) -#define ia64_m23(code2, qp, f1, imm, x3) do { check_freg ((f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m23(code, qp, f1, imm, x3) do { check_freg ((f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6) #define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7) -#define ia64_m24(code2, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m24(code, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1) #define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2) @@ -1049,64 +1114,64 @@ typedef enum { #define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3) #define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3) -#define ia64_m25(code2, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m25(code, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0) #define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0) -#define ia64_m26(code2, qp, r1, x3, x4, x2) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m26(code, qp, r1, x3, x4, x2) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1) -#define ia64_m27(code2, qp, f1, x3, x4, x2) do { check_freg ((f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m27(code, qp, f1, x3, x4, x2) do { check_freg ((f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1) -#define ia64_m28(code2, qp, r3, x3, x6, x) do { check_greg ((r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0) +#define ia64_m28(code, qp, r3, x3, x6, x) do { check_greg ((r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0) #define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0) #define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1) -#define ia64_m29(code2, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m29(code, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a) -#define ia64_m30(code2, qp, ar3, imm, x3, x4, x2) do { check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2) -#define ia64_m31(code2, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m31(code, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22) -#define ia64_m32(code2, qp, cr3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m32(code, qp, cr3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C) -#define ia64_m33(code2, qp, r1, cr3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m33(code, qp, r1, cr3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24) -#define ia64_m34(code2, qp, r1, sor, sol, sof, x3) do { check_greg ((r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); ia64_begin_bundle ((code)); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { check_greg ((r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); ia64_begin_bundle ((code)); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0) #define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0) -#define ia64_m35(code2, qp, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m35(code, qp, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D) #define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29) -#define ia64_m36(code2, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m36(code, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25) #define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21) -#define ia64_m37(code2, qp, imm, x3, x2, x4) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m37(code, qp, imm, x3, x2, x4) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0) /* The System/Memory Management instruction encodings (M38-M47) are missing */ -#define ia64_m48(code2, qp, imm, x3, x4, x2, y) do { check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0) #define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1) @@ -1128,29 +1193,29 @@ typedef enum { IA64_DH_CLR = 1 } Ia64BranchCacheDeallocHint; -#define ia64_b1(code2, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) +#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) #define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0) #define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2) #define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3) -#define ia64_b2(code2, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) +#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) #define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5) #define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6) #define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7) -#define ia64_b3(code2, qp, b1, imm, bwh, ph, dh) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); check_breg ((b1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); check_breg ((b1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) -#define ia64_b4(code2, qp, b2, bwh, ph, dh, x6, btype) do { check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) +#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) #define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0) #define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1) #define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4) -#define ia64_b5(code2, qp, b1, b2, bwh, ph, dh) do { check_breg ((b1)); check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) +#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { check_breg ((b1)); check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) #define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) @@ -1163,7 +1228,7 @@ typedef enum { /* B6 and B7 is missing */ -#define ia64_b8(code2, qp, x6) do { ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0) +#define ia64_b8(code, qp, x6) do { ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0) #define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02) #define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04) @@ -1173,29 +1238,29 @@ typedef enum { #define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D) #define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10) -#define ia64_b9(code2, qp, imm, opcode, x6) do { check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) +#define ia64_b9(code, qp, imm, opcode, x6) do { check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) #define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00) #define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00) #define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01) -#define ia64_x1(code2, qp, imm, x3, x6) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_x1(code, qp, imm, x3, x6) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code2, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x3f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x3f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -#define ia64_x3(code2, qp, imm, bwh, ph, dh, btype) do { ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0) +#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0) #define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0) -#define ia64_x4(code2, qp, b1, imm, bwh, ph, dh) do { check_breg ((b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0) +#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { check_breg ((b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0) #define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) -#define ia64_x5(code2, qp, imm, x3, x6, y) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_x5(code, qp, imm, x3, x6, y) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0) #define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1) @@ -1477,11 +1542,11 @@ typedef enum { #define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp) -#define ia64_mov_to_breg(code, b1, r2, disp, wh, ih) ia64_mov_to_breg_pred ((code), 0, b1, r2, disp, wh, ih) -#define ia64_mov_ret_to_breg(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_breg_pred ((code), 0, b1, r2, disp, wh, ih) +#define ia64_mov_to_br(code, b1, r2, disp, wh, ih) ia64_mov_to_br_pred ((code), 0, b1, r2, disp, wh, ih) +#define ia64_mov_ret_to_br(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_pred ((code), 0, b1, r2, disp, wh, ih) -#define ia64_mov_from_breg(code, r1, b2) ia64_mov_from_breg_pred ((code), 0, r1, b2) +#define ia64_mov_from_br(code, r1, b2) ia64_mov_from_br_pred ((code), 0, r1, b2) #define ia64_mov_to_pred(code, r2, mask) ia64_mov_to_pred_pred ((code), 0, r2, mask) @@ -2021,4 +2086,11 @@ typedef enum { #define ia64_nop_x(code, imm) ia64_nop_x_pred ((code), 0, imm) #define ia64_hint_x(code, imm) ia64_hint_x_pred ((code), 0, imm) +/* + * Pseudo-ops + */ + +#define ia64_mov_pred(code, qp, r1, r3) ia64_adds_imm_pred ((code), (qp), (r1), 0, (r3)) +#define ia64_mov(code, r1, r3) ia64_mov_pred ((code), 0, (r1), (r3)) + #endif -- cgit v1.1 From fee3f0247077513ba3254ddb410687a11c667b8c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 20 May 2005 21:55:37 +0000 Subject: 2005-05-21 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=44855 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 12 +++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5fee199..4b3fd1e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-21 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-19 Zoltan Varga * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 746fd84..39fff70 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -160,11 +160,13 @@ static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); ia64_emit_bundle (&code, TRUE); \ } while (0) +/* To ease debugging, we emit instructions immediately */ #define ia64_emit_ins(code, itype, ins) do { \ code.instructions [code.nins] = ins; \ code.itypes [code.nins] = itype; \ code.stops [code.nins] = 1; \ code.nins ++; \ + if ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); \ if (code.nins == 3) \ ia64_emit_bundle (&code, FALSE); \ } while (0) @@ -521,9 +523,9 @@ typedef enum { #define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) -#define ia64_pshl2_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) -#define ia64_pshl4_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) -#define ia64_shl_pred(code, qp, r1, r3, r2) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1) +#define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) +#define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) +#define ia64_shl_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1) #define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), 0); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) @@ -1248,7 +1250,7 @@ typedef enum { #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x3f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) @@ -2074,7 +2076,7 @@ typedef enum { #define ia64_break_x(code, imm) ia64_break_x_pred ((code), 0, imm) -#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, r1, imm) +#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, (r1), (imm)) #define ia64_brl_cond_hint(code, disp, bwh, ph, dh) ia64_brl_cond_hint_pred ((code), 0, disp, bwh, ph, dh) -- cgit v1.1 From e32454dae1a3679056fb4ac86ffc81defc3a5eb7 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 22 May 2005 01:29:00 +0000 Subject: 2005-05-22 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=44883 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4b3fd1e..c2d920f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-22 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-21 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 39fff70..b1fa342 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -119,7 +119,11 @@ typedef enum { #define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) #define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f) +#define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f) #define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) +#define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7) +#define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f) +#define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1) #define IA64_NOP_I ((0x01 << 27)) #define IA64_NOP_M ((0x01 << 27)) -- cgit v1.1 From 1d1c3f56953c0cb26c2e695b468ea1da368aaef0 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 22 May 2005 13:31:28 +0000 Subject: 2005-05-22 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=44888 --- ChangeLog | 2 ++ ia64/ia64-codegen.h | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index c2d920f..9137130 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,8 @@ 2005-05-22 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. + + * ia64/ia64-codegen.h: Ongoing IA64 work. 2005-05-21 Zoltan Varga diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index b1fa342..954ce17 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -220,6 +220,9 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) code->nins = 0; } +#define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127)) +#define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) + #if 1 #define check_assert(cond) g_assert((cond)) @@ -420,6 +423,23 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1) #define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1) +/* Pseudo ops */ +#define ia64_cmp_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) +#define ia64_cmp_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3)) +#define ia64_cmp_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3)) +#define ia64_cmp_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) +#define ia64_cmp_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3)) +#define ia64_cmp_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3)) +#define ia64_cmp_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) + +#define ia64_cmp4_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) +#define ia64_cmp4_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3)) +#define ia64_cmp4_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3)) +#define ia64_cmp4_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) +#define ia64_cmp4_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3)) +#define ia64_cmp4_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3)) +#define ia64_cmp4_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) + #define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) #define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0) @@ -1348,7 +1368,6 @@ typedef enum { #define ia64_cmp4_gtu(code, p1, p2, r2, r3) ia64_cmp4_gtu_pred ((code), 0, p1, p2, r2, r3) #define ia64_cmp4_geu(code, p1, p2, r2, r3) ia64_cmp4_geu_pred ((code), 0, p1, p2, r2, r3) - #define ia64_cmp_gt_and(code, p1, p2, r2, r3) ia64_cmp_gt_and_pred ((code), 0, p1, p2, r2, r3) #define ia64_cmp_gt_or(code, p1, p2, r2, r3) ia64_cmp_gt_or_pred ((code), 0, p1, p2, r2, r3) #define ia64_cmp_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3) @@ -1402,6 +1421,22 @@ typedef enum { #define ia64_cmp4_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3) #define ia64_cmp4_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3) +/* Pseudo ops */ +#define ia64_cmp_ne_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_le_imm(code, p1, p2, imm8, r3) ia64_cmp_le_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_gt_imm(code, p1, p2, imm8, r3) ia64_cmp_gt_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_ge_imm(code, p1, p2, imm8, r3) ia64_cmp_ge_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_leu_imm(code, p1, p2, imm8, r3) ia64_cmp_leu_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp_gtu_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp_geu_imm(code, p1, p2, imm8, r3) ia64_cmp_geu_imm_pred((code), 0, p1, p2, imm8, r3) + +#define ia64_cmp4_ne_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_le_imm(code, p1, p2, imm8, r3) ia64_cmp4_le_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_gt_imm(code, p1, p2, imm8, r3) ia64_cmp4_gt_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_ge_imm(code, p1, p2, imm8, r3) ia64_cmp4_ge_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_leu_imm(code, p1, p2, imm8, r3) ia64_cmp4_leu_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp4_gtu_imm_pred((code), 0, p1, p2, imm8, r3) +#define ia64_cmp4_geu_imm(code, p1, p2, imm8, r3) ia64_cmp4_geu_imm_pred((code), 0, p1, p2, imm8, r3) #define ia64_padd1(code, r1,r2,r3) ia64_padd1_pred ((code), 0, r1,r2,r3) #define ia64_padd2(code, r1,r2,r3) ia64_padd2_pred ((code), 0, r1,r2,r3) -- cgit v1.1 From f37723d307325b539fc515774d3988e0c7ff7a14 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 22 May 2005 18:25:06 +0000 Subject: 2005-05-22 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=44892 --- ChangeLog | 2 + ia64/codegen.c | 96 +++++++++++++++++++ ia64/ia64-codegen.h | 261 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 359 insertions(+) diff --git a/ChangeLog b/ChangeLog index 9137130..1de7a6d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,8 @@ * ia64/ia64-codegen.h: Ongoing IA64 work. + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-21 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/codegen.c b/ia64/codegen.c index 7d5e71a..5e893e3 100644 --- a/ia64/codegen.c +++ b/ia64/codegen.c @@ -725,8 +725,103 @@ main () ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL); + /* FLOATING-POINT */ + ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2); + ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2); + + ia64_xma_l_pred (code, 1, 1, 2, 3, 4); + ia64_xma_h_pred (code, 1, 1, 2, 3, 4); + ia64_xma_hu_pred (code, 1, 1, 2, 3, 4); + + ia64_fselect_pred (code, 1, 1, 2, 3, 4); + + ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); + + ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff); + ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff); + + ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0); + ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0); + + ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0); + ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0); + + ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fman_sf_pred (code, 1, 2, 3, 4, 0); + ia64_famin_sf_pred (code, 1, 2, 3, 4, 0); + ia64_famax_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0); + ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0); + + ia64_fmerge_s_pred (code, 1, 2, 3, 4); + ia64_fmerge_ns_pred (code, 1, 2, 3, 4); + ia64_fmerge_se_pred (code, 1, 2, 3, 4); + ia64_fmix_lr_pred (code, 1, 2, 3, 4); + ia64_fmix_r_pred (code, 1, 2, 3, 4); + ia64_fmix_l_pred (code, 1, 2, 3, 4); + ia64_fsxt_r_pred (code, 1, 2, 3, 4); + ia64_fsxt_l_pred (code, 1, 2, 3, 4); + ia64_fpack_pred (code, 1, 2, 3, 4); + ia64_fswap_pred (code, 1, 2, 3, 4); + ia64_fswap_nl_pred (code, 1, 2, 3, 4); + ia64_fswap_nr_pred (code, 1, 2, 3, 4); + ia64_fand_pred (code, 1, 2, 3, 4); + ia64_fandcm_pred (code, 1, 2, 3, 4); + ia64_for_pred (code, 1, 2, 3, 4); + ia64_fxor_pred (code, 1, 2, 3, 4); + ia64_fpmerge_s_pred (code, 1, 2, 3, 4); + ia64_fpmerge_ns_pred (code, 1, 2, 3, 4); + ia64_fpmerge_se_pred (code, 1, 2, 3, 4); + + ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0); + ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0); + ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0); + ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0); + ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0); + ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0); + ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0); + ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0); + + ia64_fcvt_xf_pred ((code), 1, 2, 3); + + ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3); + + ia64_fclrf_sf_pred ((code), 1, 3); + + ia64_fchkf_sf_pred ((code), 1, -1, 3); + + ia64_break_f_pred ((code), 1, 0x123456); + ia64_codegen_close (code); +#if 0 /* disassembly */ { guint8 *buf = code.buf; @@ -752,6 +847,7 @@ main () g_assert (dw1 == ((guint64*)buf) [0]); g_assert (dw2 == ((guint64*)buf) [1]); } +#endif mono_disassemble_code (buf, 40960, "code"); diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 954ce17..ed52ab3 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -208,6 +208,9 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) case IA64_INS_TYPE_B: ia64_emit_bundle_template (code, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [i]); break; + case IA64_INS_TYPE_F: + ia64_emit_bundle_template (code, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I); + break; case IA64_INS_TYPE_LX: ia64_emit_bundle_template (code, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [i], code->instructions [i + 1]); i ++; @@ -237,6 +240,8 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define check_freg(fr) check_assert ((guint64)(fr) < 128) +#define check_fr(fr) check_assert ((guint64)(fr) < 128) + #define check_preg(pr) check_assert ((guint64)(pr) < 64) #define check_breg(pr) check_assert ((guint64)(pr) < 8) @@ -264,6 +269,8 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define check_dh(dh) check_assert ((dh) >= 0 && (dh) <= IA64_DH_CLR) +#define check_sf(sf) check_assert ((sf) >= 0 && (sf) <= 3) + #define check_gregs(r1,r2,r3) do { check_greg ((r1)); check_greg ((r2)); check_greg ((r3)); } while (0) #define check_pregs(p1,p2) do { check_preg ((p1)); check_preg ((p2)); } while (0) @@ -1270,6 +1277,147 @@ typedef enum { #define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00) #define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01) +/* + * FLOATING POINT + */ + +#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0) + +#define ia64_fma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 0) +#define ia64_fma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 1) +#define ia64_fma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 0) +#define ia64_fpma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 1) +#define ia64_fms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 0) +#define ia64_fms_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 1) +#define ia64_fms_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 0) +#define ia64_fpms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 1) +#define ia64_fnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 0) +#define ia64_fnma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 1) +#define ia64_fnma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 0) +#define ia64_fpnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 1) + +#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0) + +#define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0) +#define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3) +#define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2) + +#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0) + +#define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0) + +#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { check_fr ((f2)); check_fr ((f3)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0) + +#define ia64_fcmp_eq_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 0) +#define ia64_fcmp_lt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 0) +#define ia64_fcmp_le_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 0) +#define ia64_fcmp_unord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 0) +#define ia64_fcmp_eq_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 1) +#define ia64_fcmp_lt_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 1) +#define ia64_fcmp_le_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 1) +#define ia64_fcmp_unord_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 1) + +/* Pseudo ops */ +#define ia64_fcmp_gt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf)) +#define ia64_fcmp_ge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf)) +#define ia64_fcmp_ne_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) +#define ia64_fcmp_nlt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) +#define ia64_fcmp_nle_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) +#define ia64_fcmp_ngt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf)) +#define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf)) +#define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) + +#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { check_fr ((f2)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (fclass) & 0x7f, 20, (p2), 27, (((guint64)(fclass)) >> 7) & 0x3, 33, (opcode), 37); } while (0) + +#define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0) +#define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1) + +#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_preg ((p2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) + +#define ia64_frcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 0, 1, 0) +#define ia64_fprcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 1, 1, 0) + +#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { check_fr ((f1)); check_fr ((f3)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) + +#define ia64_frsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 0, 1, 1) +#define ia64_fprsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 1, 1, 1) + +#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) + +#define ia64_fmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x14) +#define ia64_fman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x15) +#define ia64_famin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x16) +#define ia64_famax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x17) +#define ia64_fpmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x14) +#define ia64_fpman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x15) +#define ia64_fpamin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x16) +#define ia64_fpamax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x17) +#define ia64_fpcmp_eq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x30) +#define ia64_fpcmp_lt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x31) +#define ia64_fpcmp_le_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x32) +#define ia64_fpcmp_unord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x33) +#define ia64_fpcmp_neq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x34) +#define ia64_fpcmp_nlt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x35) +#define ia64_fpcmp_nle_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x36) +#define ia64_fpcmp_ord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x37) + +#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0) + +#define ia64_fmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10) +#define ia64_fmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11) +#define ia64_fmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12) +#define ia64_fmix_lr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x39) +#define ia64_fmix_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3A) +#define ia64_fmix_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3B) +#define ia64_fsxt_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3C) +#define ia64_fsxt_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3D) +#define ia64_fpack_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x28) +#define ia64_fswap_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x34) +#define ia64_fswap_nl_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x35) +#define ia64_fswap_nr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x36) +#define ia64_fand_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2C) +#define ia64_fandcm_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2D) +#define ia64_for_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2E) +#define ia64_fxor_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2F) +#define ia64_fpmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10) +#define ia64_fpmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11) +#define ia64_fpmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12) + +#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) + +#define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18) +#define ia64_fcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x19) +#define ia64_fcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1A) +#define ia64_fcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1B) +#define ia64_fpcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x18) +#define ia64_fpcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x19) +#define ia64_fpcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1A) +#define ia64_fpcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1B) + +#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { check_fr ((f1)); check_fr ((f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0) + +#define ia64_fcvt_xf_pred(code, qp, f1, f2) ia64_f11 ((code), (qp), (f1), (f2), 0, 0, 0x1C) + +#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) + +#define ia64_fsetc_sf_pred(code, qp, amask, omask, sf) ia64_f12 ((code), (qp), (amask), (omask), (sf), 0, 0, 0x04) + +#define ia64_f13(code, qp, sf, opcode, x, x6) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) + +#define ia64_fclrf_sf_pred(code, qp, sf) ia64_f13 ((code), (qp), (sf), 0, 0, 0x05) + +#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) + +#define ia64_fchkf_sf_pred(code, qp, disp, sf) ia64_f14 ((code), (qp), (disp), (sf), 0, 0, 0x8) + +#define ia64_f15(code, qp, imm, opcode, x, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) + +#define ia64_break_f_pred(code, qp, imm) ia64_f15 ((code), (qp), (imm), 0, 0, 0x0) + +/* + * X-UNIT ENCODINGS + */ + #define ia64_x1(code, qp, imm, x3, x6) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) @@ -1291,6 +1439,11 @@ typedef enum { #define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0) #define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1) + + + + + /* * Non predicated instruction variants */ @@ -2134,4 +2287,112 @@ typedef enum { #define ia64_mov_pred(code, qp, r1, r3) ia64_adds_imm_pred ((code), (qp), (r1), 0, (r3)) #define ia64_mov(code, r1, r3) ia64_mov_pred ((code), 0, (r1), (r3)) +/* + * FLOATING POINT + */ + +#define ia64_fma_sf(code, f1, f3, f4, f2, sf) ia64_fma_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fma_s_sf(code, f1, f3, f4, f2, sf) ia64_fma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fma_d_sf(code, f1, f3, f4, f2, sf) ia64_fma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fpma_sf(code, f1, f3, f4, f2, sf) ia64_fpma_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fms_sf(code, f1, f3, f4, f2, sf) ia64_fms_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fms_s_sf(code, f1, f3, f4, f2, sf) ia64_fms_s_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fms_d_sf(code, f1, f3, f4, f2, sf) ia64_fms_d_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fpms_sf(code, f1, f3, f4, f2, sf) ia64_fpms_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fnma_sf(code, f1, f3, f4, f2, sf) ia64_fnma_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fnma_s_sf(code, f1, f3, f4, f2, sf) ia64_fnma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fnma_d_sf(code, f1, f3, f4, f2, sf) ia64_fnma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf) +#define ia64_fpnma_sf(code, f1, f3, f4, f2, sf) ia64_fpnma_sf_pred ((code), 0, f1, f3, f4, f2, sf) + +#define ia64_xma_l(code, f1, f3, f4, f2) ia64_xma_l_pred ((code), 0, f1, f3, f4, f2) +#define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2) +#define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2) + +#define ia64_fselect(code, f1, f3, f4, f2) ia64_fselect_pred ((code), 0, f1, f3, f4, f2) + +#define ia64_fcmp_eq_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_lt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_le_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_unord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_eq_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_lt_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_le_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_unord_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf) + +/* Pseudo ops */ +#define ia64_fcmp_gt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_gt_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_ge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ge_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_ne_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ne_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_nlt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nlt_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_nle_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nle_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_ngt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ngt_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_nge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nge_sf_pred ((code), 0, p1, p2, f2, f3, sf) +#define ia64_fcmp_ord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ord_sf_pred ((code), 0, p1, p2, f2, f3, sf) + +#define ia64_fclass_m(code, p1, p2, f2, fclass) ia64_fclass_m_pred ((code), 0, p1, p2, f2, fclass) +#define ia64_fclass_m_unc(code, p1, p2, f2, fclass) ia64_fclass_m_unc_pred ((code), 0, p1, p2, f2, fclass) + +#define ia64_frcpa_sf(code, f1, p2, f2, f3, sf) ia64_frcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf) +#define ia64_fprcpa_sf(code, f1, p2, f2, f3, sf) ia64_fprcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf) + +#define ia64_frsqrta_sf(code, f1, p2, f3, sf) ia64_frsqrta_sf_pred ((code), 0, f1, p2, f3, sf) +#define ia64_fprsqrta_sf(code, f1, p2, f3, sf) ia64_fprsqrta_sf_pred ((code), 0, f1, p2, f3, sf) + +#define ia64_fmin_sf(code, f1, f2, f3, sf) ia64_fmin_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fman_sf(code, f1, f2, f3, sf) ia64_fman_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_famin_sf(code, f1, f2, f3, sf) ia64_famin_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_famax_sf(code, f1, f2, f3, sf) ia64_famax_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpmin_sf(code, f1, f2, f3, sf) ia64_fpmin_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpman_sf(code, f1, f2, f3, sf) ia64_fpman_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpamin_sf(code, f1, f2, f3, sf) ia64_fpamin_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpamax_sf(code, f1, f2, f3, sf) ia64_fpamax_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_eq_sf(code, f1, f2, f3, sf) ia64_fpcmp_eq_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_lt_sf(code, f1, f2, f3, sf) ia64_fpcmp_lt_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_le_sf(code, f1, f2, f3, sf) ia64_fpcmp_le_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_unord_sf(code, f1, f2, f3, sf) ia64_fpcmp_unord_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_neq_sf(code, f1, f2, f3, sf) ia64_fpcmp_neq_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_nlt_sf(code, f1, f2, f3, sf) ia64_fpcmp_nlt_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_nle_sf(code, f1, f2, f3, sf) ia64_fpcmp_nle_sf_pred ((code), 0, f1, f2, f3, sf) +#define ia64_fpcmp_ord_sf(code, f1, f2, f3, sf) ia64_fpcmp_ord_sf_pred ((code), 0, f1, f2, f3, sf) + +#define ia64_fmerge_s(code, f1, f2, f3) ia64_fmerge_s_pred ((code), 0, f1, f2, f3) +#define ia64_fmerge_ns(code, f1, f2, f3) ia64_fmerge_ns_pred ((code), 0, f1, f2, f3) +#define ia64_fmerge_se(code, f1, f2, f3) ia64_fmerge_se_pred ((code), 0, f1, f2, f3) +#define ia64_fmix_lr(code, f1, f2, f3) ia64_fmix_lr_pred ((code), 0, f1, f2, f3) +#define ia64_fmix_r(code, f1, f2, f3) ia64_fmix_r_pred ((code), 0, f1, f2, f3) +#define ia64_fmix_l(code, f1, f2, f3) ia64_fmix_l_pred ((code), 0, f1, f2, f3) +#define ia64_fsxt_r(code, f1, f2, f3) ia64_fsxt_r_pred ((code), 0, f1, f2, f3) +#define ia64_fsxt_l(code, f1, f2, f3) ia64_fsxt_l_pred ((code), 0, f1, f2, f3) +#define ia64_fpack(code, f1, f2, f3) ia64_fpack_pred ((code), 0, f1, f2, f3) +#define ia64_fswap(code, f1, f2, f3) ia64_fswap_pred ((code), 0, f1, f2, f3) +#define ia64_fswap_nl(code, f1, f2, f3) ia64_fswap_nl_pred ((code), 0, f1, f2, f3) +#define ia64_fswap_nr(code, f1, f2, f3) ia64_fswap_nr_pred ((code), 0, f1, f2, f3) +#define ia64_fand(code, f1, f2, f3) ia64_fand_pred ((code), 0, f1, f2, f3) +#define ia64_fandcm(code, f1, f2, f3) ia64_fandcm_pred ((code), 0, f1, f2, f3) +#define ia64_for(code, f1, f2, f3) ia64_for_pred ((code), 0, f1, f2, f3) +#define ia64_fxor(code, f1, f2, f3) ia64_fxor_pred ((code), 0, f1, f2, f3) +#define ia64_fpmerge_s(code, f1, f2, f3) ia64_fpmerge_s_pred ((code), 0, f1, f2, f3) +#define ia64_fpmerge_ns(code, f1, f2, f3) ia64_fpmerge_ns_pred ((code), 0, f1, f2, f3) +#define ia64_fpmerge_se(code, f1, f2, f3) ia64_fpmerge_se_pred ((code), 0, f1, f2, f3) + +#define ia64_fcvt_fx_sf(code, f1, f2, sf) ia64_fcvt_fx_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fcvt_fxu_sf(code, f1, f2, sf) ia64_fcvt_fxu_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fpcvt_fx_sf(code, f1, f2, sf) ia64_fpcvt_fx_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fpcvt_fxu_sf(code, f1, f2, sf) ia64_fpcvt_fxu_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fpcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf) +#define ia64_fpcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf) + +#define ia64_fcvt_xf(code, f1, f2) ia64_fcvt_xf_pred ((code), 0, f1, f2) + +#define ia64_fsetc_sf(code, amask, omask, sf) ia64_fsetc_sf_pred ((code), 0, amask, omask, sf) + +#define ia64_fclrf_sf(code, sf) ia64_fclrf_sf_pred ((code), 0, sf) + +#define ia64_fchkf_sf(code, disp, sf) ia64_fchkf_sf_pred ((code), 0, disp, sf) + +#define ia64_break_f(code, imm) ia64_break_f_pred ((code), 0, imm) + + #endif -- cgit v1.1 From 20c2fc7ba73ffaf5506ab9bf487c3f519de5067f Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 26 May 2005 17:16:50 +0000 Subject: 2005-05-26 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45064 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/ChangeLog b/ChangeLog index 1de7a6d..e7f664b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-26 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-22 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index ed52ab3..a2559ee 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1296,6 +1296,10 @@ typedef enum { #define ia64_fnma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 0) #define ia64_fpnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 1) +/* Pseudo ops */ +#define ia64_fnorm_s_sf_pred(code, qp, f1, f3, sf) ia64_fma_s_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf)) +#define ia64_fnorm_d_sf_pred(code, qp, f1, f3, sf) ia64_fma_d_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf)) + #define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0) #define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0) @@ -1383,6 +1387,9 @@ typedef enum { #define ia64_fpmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11) #define ia64_fpmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12) +/* Pseudo ops */ +#define ia64_fmov_pred(code, qp, f1, f3) ia64_fmerge_s_pred ((code), (qp), (f1), (f3), (f3)) + #define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) #define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18) @@ -2304,6 +2311,10 @@ typedef enum { #define ia64_fnma_d_sf(code, f1, f3, f4, f2, sf) ia64_fnma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf) #define ia64_fpnma_sf(code, f1, f3, f4, f2, sf) ia64_fpnma_sf_pred ((code), 0, f1, f3, f4, f2, sf) +/* Pseudo ops */ +#define ia64_fnorm_s_sf(code, f1, f3, sf) ia64_fnorm_s_sf_pred ((code), 0, (f1), (f3), (sf)) +#define ia64_fnorm_d_sf(code, f1, f3, sf) ia64_fnorm_d_sf_pred ((code), 0, (f1), (f3), (sf)) + #define ia64_xma_l(code, f1, f3, f4, f2) ia64_xma_l_pred ((code), 0, f1, f3, f4, f2) #define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2) #define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2) @@ -2375,6 +2386,9 @@ typedef enum { #define ia64_fpmerge_ns(code, f1, f2, f3) ia64_fpmerge_ns_pred ((code), 0, f1, f2, f3) #define ia64_fpmerge_se(code, f1, f2, f3) ia64_fpmerge_se_pred ((code), 0, f1, f2, f3) +/* Pseudo ops */ +#define ia64_fmov(code, f1, f3) ia64_fmov_pred ((code), 0, (f1), (f3)) + #define ia64_fcvt_fx_sf(code, f1, f2, sf) ia64_fcvt_fx_sf_pred ((code), 0, f1, f2, sf) #define ia64_fcvt_fxu_sf(code, f1, f2, sf) ia64_fcvt_fxu_sf_pred ((code), 0, f1, f2, sf) #define ia64_fcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf) -- cgit v1.1 From a781c3a65727b60386604adc6023f3f5a53b3e3e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 27 May 2005 21:41:59 +0000 Subject: 2005-05-28 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45127 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 13 ++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index e7f664b..c1c6a51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-28 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-26 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index a2559ee..e996d99 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -120,6 +120,7 @@ typedef enum { #define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) #define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f) #define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f) +#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x3) #define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) #define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7) #define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f) @@ -1252,6 +1253,8 @@ typedef enum { #define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) +#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred ((code), (qp), (b1), (b2), 0, 0, 0) + typedef enum { IA64_IPWH_SPTK = 0, IA64_IPWH_LOOP = 1, @@ -1942,6 +1945,11 @@ typedef enum { #define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint) #define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint) +/* Pseudo ops */ +#define ia64_ld1(code, r1, r3) ia64_ld1_hint ((code), (r1), (r3), 0) +#define ia64_ld2(code, r1, r3) ia64_ld2_hint ((code), (r1), (r3), 0) +#define ia64_ld4(code, r1, r3) ia64_ld4_hint ((code), (r1), (r3), 0) +#define ia64_ld8(code, r1, r3) ia64_ld8_hint ((code), (r1), (r3), 0) #define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) #define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) @@ -2247,17 +2255,16 @@ typedef enum { #define ia64_br_cexit_hint(code, disp, bwh, ph, dh) ia64_br_cexit_hint_pred ((code), 0, disp, bwh, ph, dh) #define ia64_br_ctop_hint(code, disp, bwh, ph, dh) ia64_br_ctop_hint_pred ((code), 0, disp, bwh, ph, dh) - #define ia64_br_call_hint(code, b1, disp, bwh, ph, dh) ia64_br_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh) - #define ia64_br_cond_reg_hint(code, b1, bwh, ph, dh) ia64_br_cond_reg_hint_pred ((code), 0, b1, bwh, ph, dh) #define ia64_br_ia_reg_hint(code, b1, bwh, ph, dh) ia64_br_ia_reg_hint_pred ((code), 0, b1, bwh, ph, dh) #define ia64_br_ret_reg_hint(code, b1, bwh, ph, dh) ia64_br_ret_reg_hint_pred ((code), 0, b1, bwh, ph, dh) - #define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh) +#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_hint ((code), (b1), (b2), 0, 0, 0) + #define ia64_cover(code) ia64_cover_pred ((code), 0) #define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0) #define ia64_clrrrb_pr(code) ia64_clrrrb_pr_pred ((code), 0) -- cgit v1.1 From e360150e81b841b0644b5adc604f22f4b71e3987 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 28 May 2005 17:08:04 +0000 Subject: 2005-05-28 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45145 --- ChangeLog | 2 ++ ia64/ia64-codegen.h | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index c1c6a51..b712ed8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,8 @@ 2005-05-28 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. + + * ia64/ia64-codegen.h: Ongoing IA64 work. 2005-05-26 Zoltan Varga diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index e996d99..4ae053f 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -143,7 +143,7 @@ typedef struct { int nins; } Ia64CodegenState; -static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); +G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); /* * FIXME: @@ -181,19 +181,19 @@ static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); #endif #define ia64_emit_bundle_template(code, template, i1, i2, i3) do { \ + guint64 *buf64 = (guint64*)(gpointer)(code)->buf; \ guint64 dw1, dw2; \ dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \ dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \ - ((guint64*)(code)->buf)[0] = dw1; \ - ((guint64*)(code)->buf)[1] = dw2; \ + buf64[0] = dw1; \ + buf64[1] = dw2; \ (code)->buf += 16; \ } while (0) -static void +G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) { - int i, template; - guint64 i1, i2, i3; + int i; for (i = 0; i < code->nins; ++i) { switch (code->itypes [i]) { -- cgit v1.1 From 7b483f1f48c7abc9d0c17a1fb34b30ddaa7058bb Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 28 May 2005 18:02:41 +0000 Subject: 2005-05-28 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45147 --- ChangeLog | 2 ++ ia64/ia64-codegen.h | 1 + 2 files changed, 3 insertions(+) diff --git a/ChangeLog b/ChangeLog index b712ed8..ba6ac1a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,8 @@ * ia64/ia64-codegen.h: Ongoing IA64 work. + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-26 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 4ae053f..74572de 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1690,6 +1690,7 @@ typedef enum { #define ia64_pshl4(code, r1, r3, r2) ia64_pshl4_pred ((code), 0, r1, r3, r2) #define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2) +#define ia64_shl_imm(code, r1, r3, count) ia64_dep_z ((code), (r1), (r3), count, 64 - count) #define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count) #define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count) -- cgit v1.1 From 4be6ea9e269927e9fbf06b0b73f53fef311f569f Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 29 May 2005 11:16:27 +0000 Subject: 2005-05-29 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45157 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index ba6ac1a..0b27491 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-29 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-28 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 74572de..864f539 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -120,10 +120,16 @@ typedef enum { #define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) #define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f) #define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f) -#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x3) +#define ia64_ins_r2(ins) ((((guint64)(ins)) >> 13) & 0x7f) +#define ia64_ins_r3(ins) ((((guint64)(ins)) >> 20) & 0x7f) + +#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x7) +#define ia64_ins_b2(ins) ((((guint64)(ins)) >> 13) & 0x7) #define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) +#define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1) #define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7) #define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f) +#define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1) #define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1) #define IA64_NOP_I ((0x01 << 27)) -- cgit v1.1 From d6844049f8659741b3afe9fa66136738107d28ac Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 29 May 2005 14:24:56 +0000 Subject: 2005-05-29 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45159 --- ChangeLog | 2 + ia64/ia64-codegen.h | 571 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 557 insertions(+), 16 deletions(-) diff --git a/ChangeLog b/ChangeLog index 0b27491..d205010 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,8 @@ 2005-05-29 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. + + * ia64/ia64-codegen.h: Ongoing IA64 work. 2005-05-28 Zoltan Varga diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 864f539..205301f 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -112,10 +112,10 @@ typedef enum { } Ia64ApplicationRegister; /* disassembly */ -#define ia64_bundle_template(code) ((*(guint64*)code) & 0x1f) -#define ia64_bundle_ins1(code) (((*(guint64*)code) >> 5) & 0x1ffffffffff) -#define ia64_bundle_ins2(code) (((*(guint64*)code) >> 46) | ((((guint64*)code)[1] & 0x3ffff) << 18)) -#define ia64_bundle_ins3(code) ((((guint64*)code)[1]) >> 23) +#define ia64_bundle_template(code) ((*(guint64*)(gpointer)code) & 0x1f) +#define ia64_bundle_ins1(code) (((*(guint64*)(gpointer)code) >> 5) & 0x1ffffffffff) +#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x3ffff) << 18)) +#define ia64_bundle_ins3(code) ((((guint64*)(gpointer)code)[1]) >> 23) #define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) #define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f) @@ -647,8 +647,15 @@ typedef enum { IA64_BR_IH_IMP = 1 } Ia64BranchImportanceHint; -#define ia64_mov_to_br_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh) -#define ia64_mov_ret_to_br_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh) +#define ia64_mov_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh) +#define ia64_mov_ret_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh) + +/* Pseudo ops */ + +#define ia64_mov_to_br_pred(code, qp, b1, r2) ia64_mov_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0) +#define ia64_mov_ret_to_br_pred(code, qp, b1, r2) ia64_mov_ret_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0) + +/* End of pseudo ops */ #define ia64_i22(code, qp, r1, b2, x3, x6) do { check_gregs ((r1), 0, 0); check_breg ((b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) @@ -857,6 +864,154 @@ typedef enum { #define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A) #define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B) +/* Pseudo ops */ + +#define ia64_ld1_pred(code, qp, r1, r3) ia64_ld1_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_pred(code, qp, r1, r3) ia64_ld2_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_pred(code, qp, r1, r3) ia64_ld4_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_pred(code, qp, r1, r3) ia64_ld8_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_s_pred(code, qp, r1, r3) ia64_ld1_s_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_s_pred(code, qp, r1, r3) ia64_ld2_s_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_s_pred(code, qp, r1, r3) ia64_ld4_s_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_s_pred(code, qp, r1, r3) ia64_ld8_s_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_a_pred(code, qp, r1, r3) ia64_ld1_a_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_a_pred(code, qp, r1, r3) ia64_ld2_a_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_a_pred(code, qp, r1, r3) ia64_ld4_a_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_a_pred(code, qp, r1, r3) ia64_ld8_a_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_sa_pred(code, qp, r1, r3) ia64_ld1_sa_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_sa_pred(code, qp, r1, r3) ia64_ld2_sa_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_sa_pred(code, qp, r1, r3) ia64_ld4_sa_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_sa_pred(code, qp, r1, r3) ia64_ld8_sa_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_bias_pred(code, qp, r1, r3) ia64_ld1_bias_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_bias_pred(code, qp, r1, r3) ia64_ld2_bias_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_bias_pred(code, qp, r1, r3) ia64_ld4_bias_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_bias_pred(code, qp, r1, r3) ia64_ld8_bias_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_acq_pred(code, qp, r1, r3) ia64_ld1_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_acq_pred(code, qp, r1, r3) ia64_ld2_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_acq_pred(code, qp, r1, r3) ia64_ld4_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_acq_pred(code, qp, r1, r3) ia64_ld8_acq_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld8_fill_pred(code, qp, r1, r3) ia64_ld8_fill_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_c_clr_pred(code, qp, r1, r3) ia64_ld1_c_clr_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_c_clr_pred(code, qp, r1, r3) ia64_ld2_c_clr_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_c_clr_pred(code, qp, r1, r3) ia64_ld4_c_clr_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_c_clr_pred(code, qp, r1, r3) ia64_ld8_c_clr_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_c_nc_pred(code, qp, r1, r3) ia64_ld1_c_nc_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_c_nc_pred(code, qp, r1, r3) ia64_ld2_c_nc_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_c_nc_pred(code, qp, r1, r3) ia64_ld4_c_nc_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_c_nc_pred(code, qp, r1, r3) ia64_ld8_c_nc_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_c_clr_acq_pred(code, qp, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld2_c_clr_acq_pred(code, qp, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld4_c_clr_acq_pred(code, qp, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld8_c_clr_acq_pred(code, qp, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld16_pred(code, qp, r1, r3) ia64_ld16_hint_pred (code, qp, r1, r3, 0) +#define ia64_ld16_acq_pred(code, qp, r1, r3) ia64_ld16_acq_hint_pred (code, qp, r1, r3, 0) + +#define ia64_ld1_inc_pred(code, qp, r1, r2, r3) ia64_ld1_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_inc_pred(code, qp, r1, r2, r3) ia64_ld2_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_inc_pred(code, qp, r1, r2, r3) ia64_ld4_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_inc_pred(code, qp, r1, r2, r3) ia64_ld8_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_s_inc_pred(code, qp, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_s_inc_pred(code, qp, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_s_inc_pred(code, qp, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_s_inc_pred(code, qp, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_a_inc_pred(code, qp, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_a_inc_pred(code, qp, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_a_inc_pred(code, qp, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_a_inc_pred(code, qp, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld8_fill_inc_pred(code, qp, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld2_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld4_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) +#define ia64_ld8_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0) + +#define ia64_ld1_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld8_fill_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +#define ia64_ld1_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld2_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld4_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) +#define ia64_ld8_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0) + +/* End of pseudo ops */ + #define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30) @@ -990,6 +1145,106 @@ typedef enum { #define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B) +/* Pseudo ops */ + +#define ia64_ldfs_pred(code, qp, f1, r3) ia64_ldfs_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_pred(code, qp, f1, r3) ia64_ldfd_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_pred(code, qp, f1, r3) ia64_ldf8_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_pred(code, qp, f1, r3) ia64_ldfe_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_s_pred(code, qp, f1, r3) ia64_ldfs_s_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_s_pred(code, qp, f1, r3) ia64_ldfd_s_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_s_pred(code, qp, f1, r3) ia64_ldf8_s_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_s_pred(code, qp, f1, r3) ia64_ldfe_s_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_a_pred(code, qp, f1, r3) ia64_ldfs_a_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_a_pred(code, qp, f1, r3) ia64_ldfd_a_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_a_pred(code, qp, f1, r3) ia64_ldf8_a_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_a_pred(code, qp, f1, r3) ia64_ldfe_a_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_sa_pred(code, qp, f1, r3) ia64_ldfs_sa_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_sa_pred(code, qp, f1, r3) ia64_ldfd_sa_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_sa_pred(code, qp, f1, r3) ia64_ldf8_sa_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_sa_pred(code, qp, f1, r3) ia64_ldfe_sa_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_c_clr_pred(code, qp, f1, r3) ia64_ldfs_c_clr_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_c_clr_pred(code, qp, f1, r3) ia64_ldfd_c_clr_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_c_clr_pred(code, qp, f1, r3) ia64_ldf8_c_clr_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_c_clr_pred(code, qp, f1, r3) ia64_ldfe_c_clr_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_c_nc_pred(code, qp, f1, r3) ia64_ldfs_c_nc_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfd_c_nc_pred(code, qp, f1, r3) ia64_ldfd_c_nc_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldf8_c_nc_pred(code, qp, f1, r3) ia64_ldf8_c_nc_hint_pred (code, qp, f1, r3, 0) +#define ia64_ldfe_c_nc_pred(code, qp, f1, r3) ia64_ldfe_c_nc_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldf_fill_pred(code, qp, f1, r3) ia64_ldf_fill_hint_pred (code, qp, f1, r3, 0) + +#define ia64_ldfs_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_s_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_s_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_s_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_s_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_s_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_a_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_a_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_a_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_a_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_a_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_sa_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_sa_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_sa_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_sa_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfd_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldf8_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0) +#define ia64_ldfe_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldf_fill_inc_pred(code, qp, f1, r3, r2) ia64_ldf_fill_inc_hint_pred (code, qp, f1, r3, r2, 0) + +#define ia64_ldfs_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldfs_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldfs_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldfs_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldfs_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldfs_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfd_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldf8_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) +#define ia64_ldfe_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +#define ia64_ldf_fill_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf_fill_inc_imm_hint_pred (code, qp, f1, r3, imm, 0) + +/* End of pseudo ops */ + #define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32) @@ -1259,7 +1514,25 @@ typedef enum { #define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) -#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred ((code), (qp), (b1), (b2), 0, 0, 0) +/* Pseudo ops */ + +#define ia64_br_cond_pred(code, qp, disp) ia64_br_cond_hint_pred (code, qp, disp, 0, 0, 0) +#define ia64_br_wexit_pred(code, qp, disp) ia64_br_wexit_hint_pred (code, qp, disp, 0, 0, 0) +#define ia64_br_wtop_pred(code, qp, disp) ia64_br_wtop_hint_pred (code, qp, disp, 0, 0, 0) + +#define ia64_br_cloop_pred(code, qp, disp) ia64_br_cloop_hint_pred (code, qp, disp, 0, 0, 0) +#define ia64_br_cexit_pred(code, qp, disp) ia64_br_cexit_hint_pred (code, qp, disp, 0, 0, 0) +#define ia64_br_ctop_pred(code, qp, disp) ia64_br_ctop_hint_pred (code, qp, disp, 0, 0, 0) + +#define ia64_br_call_pred(code, qp, b1, disp) ia64_br_call_hint_pred (code, qp, b1, disp, 0, 0, 0) + +#define ia64_br_cond_reg_pred(code, qp, b1) ia64_br_cond_reg_hint_pred (code, qp, b1, 0, 0, 0) +#define ia64_br_ia_reg_pred(code, qp, b1) ia64_br_ia_reg_hint_pred (code, qp, b1, 0, 0, 0) +#define ia64_br_ret_reg_pred(code, qp, b1) ia64_br_ret_reg_hint_pred (code, qp, b1, 0, 0, 0) + +#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred (code, qp, b1, b2, 0, 0, 0) + +/* End of pseudo ops */ typedef enum { IA64_IPWH_SPTK = 0, @@ -1697,6 +1970,8 @@ typedef enum { #define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2) #define ia64_shl_imm(code, r1, r3, count) ia64_dep_z ((code), (r1), (r3), count, 64 - count) +#define ia64_shr_imm(code, r1, r3, count) ia64_extr ((code), (r1), (r3), count, 64 - count) +#define ia64_shr_u_imm(code, r1, r3, count) ia64_extr_u ((code), (r1), (r3), count, 64 - count) #define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count) #define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count) @@ -1753,9 +2028,15 @@ typedef enum { #define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp) -#define ia64_mov_to_br(code, b1, r2, disp, wh, ih) ia64_mov_to_br_pred ((code), 0, b1, r2, disp, wh, ih) -#define ia64_mov_ret_to_br(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_pred ((code), 0, b1, r2, disp, wh, ih) +#define ia64_mov_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih) +#define ia64_mov_ret_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih) +/* Pseudo ops */ + +#define ia64_mov_to_br(code, b1, r2) ia64_mov_to_br_pred ((code), 0, (b1), (r2)) +#define ia64_mov_ret_to_br(code, b1, r2) ia64_mov_ret_to_br_pred ((code), 0, (b1), (r2)) + +/* End of pseudo ops */ #define ia64_mov_from_br(code, r1, b2) ia64_mov_from_br_pred ((code), 0, r1, b2) @@ -1936,6 +2217,153 @@ typedef enum { #define ia64_ld4_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) #define ia64_ld8_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint) +/* Pseudo ops */ + +#define ia64_ld1(code, r1, r3) ia64_ld1_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2(code, r1, r3) ia64_ld2_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4(code, r1, r3) ia64_ld4_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8(code, r1, r3) ia64_ld8_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_s(code, r1, r3) ia64_ld1_s_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_s(code, r1, r3) ia64_ld2_s_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_s(code, r1, r3) ia64_ld4_s_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_s(code, r1, r3) ia64_ld8_s_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_a(code, r1, r3) ia64_ld1_a_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_a(code, r1, r3) ia64_ld2_a_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_a(code, r1, r3) ia64_ld4_a_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_a(code, r1, r3) ia64_ld8_a_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_sa(code, r1, r3) ia64_ld1_sa_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_sa(code, r1, r3) ia64_ld2_sa_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_sa(code, r1, r3) ia64_ld4_sa_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_sa(code, r1, r3) ia64_ld8_sa_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_bias(code, r1, r3) ia64_ld1_bias_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_bias(code, r1, r3) ia64_ld2_bias_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_bias(code, r1, r3) ia64_ld4_bias_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_bias(code, r1, r3) ia64_ld8_bias_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_acq(code, r1, r3) ia64_ld1_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_acq(code, r1, r3) ia64_ld2_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_acq(code, r1, r3) ia64_ld4_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_acq(code, r1, r3) ia64_ld8_acq_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld8_fill(code, r1, r3) ia64_ld8_fill_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_c_clr(code, r1, r3) ia64_ld1_c_clr_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_c_clr(code, r1, r3) ia64_ld2_c_clr_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_c_clr(code, r1, r3) ia64_ld4_c_clr_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_c_clr(code, r1, r3) ia64_ld8_c_clr_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_c_nc(code, r1, r3) ia64_ld1_c_nc_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_c_nc(code, r1, r3) ia64_ld2_c_nc_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_c_nc(code, r1, r3) ia64_ld4_c_nc_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_c_nc(code, r1, r3) ia64_ld8_c_nc_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_c_clr_acq(code, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld2_c_clr_acq(code, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld4_c_clr_acq(code, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld8_c_clr_acq(code, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld16(code, r1, r3) ia64_ld16_hint_pred (code, 0, r1, r3, 0) +#define ia64_ld16_acq(code, r1, r3) ia64_ld16_acq_hint_pred (code, 0, r1, r3, 0) + +#define ia64_ld1_inc(code, r1, r2, r3) ia64_ld1_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_inc(code, r1, r2, r3) ia64_ld2_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_inc(code, r1, r2, r3) ia64_ld4_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_inc(code, r1, r2, r3) ia64_ld8_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_s_inc(code, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_s_inc(code, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_s_inc(code, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_s_inc(code, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_a_inc(code, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_a_inc(code, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_a_inc(code, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_a_inc(code, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_sa_inc(code, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_sa_inc(code, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_sa_inc(code, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_sa_inc(code, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_bias_inc(code, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_bias_inc(code, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_bias_inc(code, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_bias_inc(code, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_acq_inc(code, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_acq_inc(code, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_acq_inc(code, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_acq_inc(code, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld8_fill_inc(code, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_c_clr_inc(code, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_c_clr_inc(code, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_c_clr_inc(code, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_c_clr_inc(code, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_c_nc_inc(code, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_c_nc_inc(code, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_c_nc_inc(code, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_c_nc_inc(code, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_c_clr_acq_inc(code, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld2_c_clr_acq_inc(code, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld4_c_clr_acq_inc(code, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) +#define ia64_ld8_c_clr_acq_inc(code, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0) + +#define ia64_ld1_inc_imm(code, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_inc_imm(code, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_inc_imm(code, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_inc_imm(code, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_s_inc_imm(code, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_s_inc_imm(code, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_s_inc_imm(code, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_s_inc_imm(code, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_a_inc_imm(code, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_a_inc_imm(code, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_a_inc_imm(code, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_a_inc_imm(code, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_sa_inc_imm(code, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_sa_inc_imm(code, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_sa_inc_imm(code, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_sa_inc_imm(code, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_bias_inc_imm(code, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_bias_inc_imm(code, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_bias_inc_imm(code, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_bias_inc_imm(code, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_acq_inc_imm(code, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_acq_inc_imm(code, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_acq_inc_imm(code, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_acq_inc_imm(code, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld8_fill_inc_imm(code, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_c_clr_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_c_clr_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_c_clr_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_c_clr_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_c_nc_inc_imm(code, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_c_nc_inc_imm(code, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_c_nc_inc_imm(code, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_c_nc_inc_imm(code, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +#define ia64_ld1_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld2_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld4_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) +#define ia64_ld8_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0) + +/* End of pseudo ops */ #define ia64_st1_hint(code, r3, r2, hint) ia64_st1_hint_pred ((code), 0, r3, r2, hint) #define ia64_st2_hint(code, r3, r2, hint) ia64_st2_hint_pred ((code), 0, r3, r2, hint) @@ -1952,12 +2380,6 @@ typedef enum { #define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint) #define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint) -/* Pseudo ops */ -#define ia64_ld1(code, r1, r3) ia64_ld1_hint ((code), (r1), (r3), 0) -#define ia64_ld2(code, r1, r3) ia64_ld2_hint ((code), (r1), (r3), 0) -#define ia64_ld4(code, r1, r3) ia64_ld4_hint ((code), (r1), (r3), 0) -#define ia64_ld8(code, r1, r3) ia64_ld8_hint ((code), (r1), (r3), 0) - #define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) #define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) #define ia64_st4_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint) @@ -2069,6 +2491,105 @@ typedef enum { #define ia64_ldf_fill_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf_fill_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint) +/* Pseudo ops */ + +#define ia64_ldfs(code, f1, r3) ia64_ldfs_pred (code, 0, f1, r3) +#define ia64_ldfd(code, f1, r3) ia64_ldfd_pred (code, 0, f1, r3) +#define ia64_ldf8(code, f1, r3) ia64_ldf8_pred (code, 0, f1, r3) +#define ia64_ldfe(code, f1, r3) ia64_ldfe_pred (code, 0, f1, r3) + +#define ia64_ldfs_s(code, f1, r3) ia64_ldfs_s_pred (code, 0, f1, r3) +#define ia64_ldfd_s(code, f1, r3) ia64_ldfd_s_pred (code, 0, f1, r3) +#define ia64_ldf8_s(code, f1, r3) ia64_ldf8_s_pred (code, 0, f1, r3) +#define ia64_ldfe_s(code, f1, r3) ia64_ldfe_s_pred (code, 0, f1, r3) + +#define ia64_ldfs_a(code, f1, r3) ia64_ldfs_a_pred (code, 0, f1, r3) +#define ia64_ldfd_a(code, f1, r3) ia64_ldfd_a_pred (code, 0, f1, r3) +#define ia64_ldf8_a(code, f1, r3) ia64_ldf8_a_pred (code, 0, f1, r3) +#define ia64_ldfe_a(code, f1, r3) ia64_ldfe_a_pred (code, 0, f1, r3) + +#define ia64_ldfs_sa(code, f1, r3) ia64_ldfs_sa_pred (code, 0, f1, r3) +#define ia64_ldfd_sa(code, f1, r3) ia64_ldfd_sa_pred (code, 0, f1, r3) +#define ia64_ldf8_sa(code, f1, r3) ia64_ldf8_sa_pred (code, 0, f1, r3) +#define ia64_ldfe_sa(code, f1, r3) ia64_ldfe_sa_pred (code, 0, f1, r3) + +#define ia64_ldfs_c_clr(code, f1, r3) ia64_ldfs_c_clr_pred (code, 0, f1, r3) +#define ia64_ldfd_c_clr(code, f1, r3) ia64_ldfd_c_clr_pred (code, 0, f1, r3) +#define ia64_ldf8_c_clr(code, f1, r3) ia64_ldf8_c_clr_pred (code, 0, f1, r3) +#define ia64_ldfe_c_clr(code, f1, r3) ia64_ldfe_c_clr_pred (code, 0, f1, r3) + +#define ia64_ldfs_c_nc(code, f1, r3) ia64_ldfs_c_nc_pred (code, 0, f1, r3) +#define ia64_ldfd_c_nc(code, f1, r3) ia64_ldfd_c_nc_pred (code, 0, f1, r3) +#define ia64_ldf8_c_nc(code, f1, r3) ia64_ldf8_c_nc_pred (code, 0, f1, r3) +#define ia64_ldfe_c_nc(code, f1, r3) ia64_ldfe_c_nc_pred (code, 0, f1, r3) + +#define ia64_ldf_fill(code, f1, r3) ia64_ldf_fill_pred (code, 0, f1, r3) + +#define ia64_ldfs_inc(code, f1, r3, r2) ia64_ldfs_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_inc(code, f1, r3, r2) ia64_ldfd_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_inc(code, f1, r3, r2) ia64_ldf8_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_inc(code, f1, r3, r2) ia64_ldfe_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_s_inc(code, f1, r3, r2) ia64_ldfs_s_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_s_inc(code, f1, r3, r2) ia64_ldfd_s_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_s_inc(code, f1, r3, r2) ia64_ldf8_s_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_s_inc(code, f1, r3, r2) ia64_ldfe_s_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_a_inc(code, f1, r3, r2) ia64_ldfs_a_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_a_inc(code, f1, r3, r2) ia64_ldfd_a_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_a_inc(code, f1, r3, r2) ia64_ldf8_a_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_a_inc(code, f1, r3, r2) ia64_ldfe_a_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_sa_inc(code, f1, r3, r2) ia64_ldfs_sa_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_sa_inc(code, f1, r3, r2) ia64_ldfd_sa_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_sa_inc(code, f1, r3, r2) ia64_ldf8_sa_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_sa_inc(code, f1, r3, r2) ia64_ldfe_sa_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_c_clr_inc(code, f1, r3, r2) ia64_ldfs_c_clr_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_c_clr_inc(code, f1, r3, r2) ia64_ldfd_c_clr_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_c_clr_inc(code, f1, r3, r2) ia64_ldf8_c_clr_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_c_clr_inc(code, f1, r3, r2) ia64_ldfe_c_clr_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_c_nc_inc(code, f1, r3, r2) ia64_ldfs_c_nc_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfd_c_nc_inc(code, f1, r3, r2) ia64_ldfd_c_nc_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldf8_c_nc_inc(code, f1, r3, r2) ia64_ldf8_c_nc_inc_pred (code, 0, f1, r3, r2) +#define ia64_ldfe_c_nc_inc(code, f1, r3, r2) ia64_ldfe_c_nc_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldf_fill_inc(code, f1, r3, r2) ia64_ldf_fill_inc_pred (code, 0, f1, r3, r2) + +#define ia64_ldfs_inc_imm(code, f1, r3, imm) ia64_ldfs_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_inc_imm(code, f1, r3, imm) ia64_ldfd_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_inc_imm(code, f1, r3, imm) ia64_ldf8_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_inc_imm(code, f1, r3, imm) ia64_ldfe_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldfs_s_inc_imm(code, f1, r3, imm) ia64_ldfs_s_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_s_inc_imm(code, f1, r3, imm) ia64_ldfd_s_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_s_inc_imm(code, f1, r3, imm) ia64_ldf8_s_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_s_inc_imm(code, f1, r3, imm) ia64_ldfe_s_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldfs_a_inc_imm(code, f1, r3, imm) ia64_ldfs_a_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_a_inc_imm(code, f1, r3, imm) ia64_ldfd_a_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_a_inc_imm(code, f1, r3, imm) ia64_ldf8_a_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_a_inc_imm(code, f1, r3, imm) ia64_ldfe_a_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldfs_sa_inc_imm(code, f1, r3, imm) ia64_ldfs_sa_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_sa_inc_imm(code, f1, r3, imm) ia64_ldfd_sa_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_sa_inc_imm(code, f1, r3, imm) ia64_ldf8_sa_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_sa_inc_imm(code, f1, r3, imm) ia64_ldfe_sa_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldfs_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_c_clr_inc_imm(code, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldfs_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfd_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldf8_c_nc_inc_imm(code, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_pred (code, 0, f1, r3, imm) +#define ia64_ldfe_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_pred (code, 0, f1, r3, imm) + +#define ia64_ldf_fill_inc_imm(code, f1, r3, imm) ia64_ldf_fill_inc_imm_pred (code, 0, f1, r3, imm) + +/* End of pseudo ops */ #define ia64_stfs_hint(code, r3, f2, hint) ia64_stfs_hint_pred ((code), 0, r3, f2, hint) #define ia64_stfd_hint(code, r3, f2, hint) ia64_stfd_hint_pred ((code), 0, r3, f2, hint) @@ -2270,7 +2791,25 @@ typedef enum { #define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh) -#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_hint ((code), (b1), (b2), 0, 0, 0) +/* Pseudo ops */ + +#define ia64_br_cond(code, disp) ia64_br_cond_pred (code, 0, disp) +#define ia64_br_wexit(code, disp) ia64_br_wexit_pred (code, 0, disp) +#define ia64_br_wtop(code, disp) ia64_br_wtop_pred (code, 0, disp) + +#define ia64_br_cloop(code, disp) ia64_br_cloop_pred (code, 0, disp) +#define ia64_br_cexit(code, disp) ia64_br_cexit_pred (code, 0, disp) +#define ia64_br_ctop(code, disp) ia64_br_ctop_pred (code, 0, disp) + +#define ia64_br_call(code, b1, disp) ia64_br_call_pred (code, 0, b1, disp) + +#define ia64_br_cond_reg(code, b1) ia64_br_cond_reg_pred (code, 0, b1) +#define ia64_br_ia_reg(code, b1) ia64_br_ia_reg_pred (code, 0, b1) +#define ia64_br_ret_reg(code, b1) ia64_br_ret_reg_pred (code, 0, b1) + +#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_pred (code, 0, b1, b2) + +/* End of pseudo ops */ #define ia64_cover(code) ia64_cover_pred ((code), 0) #define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0) -- cgit v1.1 From 5f3ca7841b8aedd35f0c23781f2ac96f31ed501e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 30 May 2005 14:09:48 +0000 Subject: 2005-05-30 Zoltan Varga * ia64/codegen.c: Fix it after latest changes. svn path=/trunk/mono/; revision=45192 --- ChangeLog | 4 ++++ ia64/codegen.c | 10 +++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index d205010..6610eb9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-05-30 Zoltan Varga + + * ia64/codegen.c: Fix it after latest changes. + 2005-05-29 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/codegen.c b/ia64/codegen.c index 5e893e3..66398e1 100644 --- a/ia64/codegen.c +++ b/ia64/codegen.c @@ -314,11 +314,11 @@ main () ia64_chk_s_i (code, 1, -1); ia64_chk_s_i (code, 1, 1); - ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); - ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); - ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); - ia64_mov_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); - ia64_mov_ret_to_br (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); + ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); + ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); + ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); + ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); ia64_mov_from_br (code, 1, 1); -- cgit v1.1 From 5a9f032072053d76af233b9906614ee491d6295c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 9 Jun 2005 20:22:08 +0000 Subject: 2005-06-09 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45719 --- ChangeLog | 4 +++ ia64/ia64-codegen.h | 71 ++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 61 insertions(+), 14 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6610eb9..c64a8c5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-06-09 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-05-30 Zoltan Varga * ia64/codegen.c: Fix it after latest changes. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 205301f..2152f5b 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -104,7 +104,10 @@ typedef enum { IA64_B4 = 4, IA64_B5 = 5, IA64_B6 = 6, - IA64_B7 = 7 + IA64_B7 = 7, + + /* Aliases */ + IA64_RP = IA64_B0 } Ia64BranchRegister; typedef enum { @@ -127,10 +130,12 @@ typedef enum { #define ia64_ins_b2(ins) ((((guint64)(ins)) >> 13) & 0x7) #define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) #define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1) +#define ia64_ins_x2a(ins) ((((guint64)(ins)) >> 34) & 0x3) #define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7) #define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f) #define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1) #define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1) +#define ia64_ins_ve(ins) ((((guint64)(ins)) >> 33) & 0x1) #define IA64_NOP_I ((0x01 << 27)) #define IA64_NOP_M ((0x01 << 27)) @@ -143,24 +148,33 @@ typedef enum { */ typedef struct { + gboolean automatic; guint8 *buf; guint64 instructions [3]; int itypes [3], stops [3]; - int nins; + int nins, template; } Ia64CodegenState; +#ifdef IA64_SIMPLE_EMIT_BUNDLE G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); +#else +void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); +#endif /* - * FIXME: - * - * In order to simplify things, we emit a stop after every instruction for - * now. Also, we emit 1 ins + 2 nops. + * There are two code generation modes: + * - in automatic mode, bundling and stops are handled automatically by the + * code generation macros. + * FIXME: In order to simplify things, we emit a stop after every instruction for + * now. Also, we emit 1 ins + 2 nops. + * - in non-automatic mode, the caller is responsible for handling bundling and + * stops using the appropriate macros. */ #define ia64_codegen_init(code, codegen_buf) do { \ code.buf = codegen_buf; \ code.nins = 0; \ + code.automatic = 1; \ } while (0) #define ia64_codegen_close(code) do { \ @@ -171,15 +185,40 @@ G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flu ia64_emit_bundle (&code, TRUE); \ } while (0) +#define ia64_end_bundle(code) do { \ + ia64_emit_bundle (&code, TRUE); \ +} while (0) + +#define ia64_codegen_set_automatic(code, is_automatic) do { \ + code.automatic = (is_automatic); \ +} while (0) + +#define ia64_stop(code) do { \ + g_assert ((code.nins > 0)); \ + code.stops [code.nins - 1] = 1; \ +} while (0) + +#define ia64_begin_bundle_template(code, bundle_template) do { \ + ia64_emit_bundle (&code, TRUE); \ + code.template = (bundle_template); \ +} while (0) + /* To ease debugging, we emit instructions immediately */ #define ia64_emit_ins(code, itype, ins) do { \ - code.instructions [code.nins] = ins; \ - code.itypes [code.nins] = itype; \ - code.stops [code.nins] = 1; \ - code.nins ++; \ - if ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); \ - if (code.nins == 3) \ - ia64_emit_bundle (&code, FALSE); \ + if (G_LIKELY (code.automatic)) { \ + code.instructions [code.nins] = ins; \ + code.itypes [code.nins] = itype; \ + code.stops [code.nins] = 1; \ + code.nins ++; \ + if ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); \ + if (code.nins == 3) \ + ia64_emit_bundle (&code, FALSE); \ + } else { \ + g_assert (code.nins < 3); \ + code.instructions [code.nins] = ins; \ + code.itypes [code.nins] = itype; \ + code.nins ++; \ + } \ } while (0) #if G_BYTE_ORDER != G_LITTLE_ENDIAN @@ -196,6 +235,8 @@ G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flu (code)->buf += 16; \ } while (0) +#ifdef IA64_SIMPLE_EMIT_BUNDLE + G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) { @@ -230,6 +271,8 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) code->nins = 0; } +#endif /* IA64_SIMPLE_EMIT_BUNDLE */ + #define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127)) #define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) @@ -1711,7 +1754,7 @@ typedef enum { #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); if (code.automatic) ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -- cgit v1.1 From 398224a9101808c8ca470b24366a506eeefec135 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 12 Jun 2005 20:41:05 +0000 Subject: 2005-06-12 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=45834 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 5 +++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index c64a8c5..84445a7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-06-12 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + 2005-06-09 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 2152f5b..90c86c3 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -275,6 +275,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127)) #define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) +#define ia64_is_imm21(imm) (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1))) #if 1 @@ -307,8 +308,8 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define check_imm8(imm) check_assert (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127)) #define check_imm9(imm) check_assert (((gint64)(imm) >= -256) && ((gint64)(imm) <= 255)) #define check_imm14(imm) check_assert (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) -#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1))) -#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x400000) && ((gint64)(imm) <= (0x400000 - 1))) +#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1))) +#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1))) #define check_imm62(imm) check_assert (((gint64)(imm) >= -0x2fffffffffffffffLL) && ((gint64)(imm) <= (0x2fffffffffffffffLL - 1))) #define check_len4(len) check_assert (((gint64)(len) >= 1) && ((gint64)(len) <= 16)) -- cgit v1.1 From f51b94e34b1a887304ace96af27d51b4ec98ab4b Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 19 Jun 2005 20:18:07 +0000 Subject: 2005-06-19 Zoltan Varga * ia64/ia64-codegen.h: Fix encoding of ia64_fclass. svn path=/trunk/mono/; revision=46224 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 84445a7..97c66e0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-06-19 Zoltan Varga + + * ia64/ia64-codegen.h: Fix encoding of ia64_fclass. + 2005-06-12 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 90c86c3..cf6581f 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1657,7 +1657,7 @@ typedef enum { #define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf)) #define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) -#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { check_fr ((f2)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (fclass) & 0x7f, 20, (p2), 27, (((guint64)(fclass)) >> 7) & 0x3, 33, (opcode), 37); } while (0) +#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { check_fr ((f2)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0) #define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0) #define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1) -- cgit v1.1 From 5a9a7537801ad68c0f8552e7e107994b793e93ac Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 22 Jun 2005 22:00:43 +0000 Subject: 2005-06-23 Zoltan Varga * ia64/ia64-codegen.h: Add some new pseudo ops. svn path=/trunk/mono/; revision=46401 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index 97c66e0..0d93f46 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-06-23 Zoltan Varga + + * ia64/ia64-codegen.h: Add some new pseudo ops. + 2005-06-19 Zoltan Varga * ia64/ia64-codegen.h: Fix encoding of ia64_fclass. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index cf6581f..c19272c 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1063,6 +1063,9 @@ typedef enum { #define ia64_st4_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x32) #define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33) +/* Pseudo ops */ +#define ia64_st8_pred(code, qp, r3, r2) ia64_st8_hint_pred ((code), (qp), (r3), (r2), 0) + #define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34) #define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35) #define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36) @@ -2414,6 +2417,9 @@ typedef enum { #define ia64_st4_hint(code, r3, r2, hint) ia64_st4_hint_pred ((code), 0, r3, r2, hint) #define ia64_st8_hint(code, r3, r2, hint) ia64_st8_hint_pred ((code), 0, r3, r2, hint) +/* Pseudo ops */ +#define ia64_st8(code, r3, r2) ia64_st8_hint ((code), (r3), (r2), 0) + #define ia64_st1_rel_hint(code, r3, r2, hint) ia64_st1_rel_hint_pred ((code), 0, r3, r2, hint) #define ia64_st2_rel_hint(code, r3, r2, hint) ia64_st2_rel_hint_pred ((code), 0, r3, r2, hint) #define ia64_st4_rel_hint(code, r3, r2, hint) ia64_st4_rel_hint_pred ((code), 0, r3, r2, hint) -- cgit v1.1 From 2205bab6932e69490e48b9e11957041e938020ee Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 18 Jul 2005 20:33:37 +0000 Subject: 2005-07-18 Zoltan Varga * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work. svn path=/trunk/mono/; revision=47395 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 0d93f46..d2a112b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-07-18 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work. + 2005-06-23 Zoltan Varga * ia64/ia64-codegen.h: Add some new pseudo ops. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index c19272c..14d70f2 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -166,7 +166,7 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); * - in automatic mode, bundling and stops are handled automatically by the * code generation macros. * FIXME: In order to simplify things, we emit a stop after every instruction for - * now. Also, we emit 1 ins + 2 nops. + * now. * - in non-automatic mode, the caller is responsible for handling bundling and * stops using the appropriate macros. */ @@ -277,6 +277,8 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191)) #define ia64_is_imm21(imm) (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1))) +#define ia64_is_adds_imm(imm) ia64_is_imm14((imm)) + #if 1 #define check_assert(cond) g_assert((cond)) -- cgit v1.1 From 0fb75c64cb1361cc81a4e47ca556a597b440d65a Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 20 Jul 2005 16:55:20 +0000 Subject: Wed Jul 20 18:01:54 BST 2005 Paolo Molaro * arm/*: more codegen macros. svn path=/trunk/mono/; revision=47473 --- ChangeLog | 5 + arm/Makefile.am | 8 +- arm/arm-codegen.h | 2 + arm/arm-fpa-codegen.h | 193 +++++++++++++++++++++++ arm/arm_fpamacros.h | 419 ++++++++++++++++++++++++++++++++++++++++++++++++++ arm/fpa_macros.th | 15 ++ arm/fpam_macros.th | 14 ++ arm/fpaops.sh | 26 ++++ 8 files changed, 680 insertions(+), 2 deletions(-) create mode 100644 arm/arm-fpa-codegen.h create mode 100644 arm/arm_fpamacros.h create mode 100644 arm/fpa_macros.th create mode 100644 arm/fpam_macros.th create mode 100755 arm/fpaops.sh diff --git a/ChangeLog b/ChangeLog index d2a112b..397c2fb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Wed Jul 20 18:01:54 BST 2005 Paolo Molaro + + * arm/*: more codegen macros. + 2005-07-18 Zoltan Varga * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work. diff --git a/arm/Makefile.am b/arm/Makefile.am index b245bcd..f9a80d4 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -3,7 +3,7 @@ INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-arm.la -BUILT_SOURCES = arm_dpimacros.h +BUILT_SOURCES = arm_dpimacros.h arm_fpamacros.h libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ @@ -16,7 +16,11 @@ libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th bash $(srcdir)/dpiops.sh +arm_fpamacros.h: fpaops.sh fpam_macros.th fpa_macros.th + bash $(srcdir)/fpaops.sh + CLEANFILES = $(BUILT_SOURCES) -EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th +EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th fpam_macros.th fpa_macros.th arm-fpa-codegen.h fpaops.sh + diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 61302a4..1d0e6e2 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -443,6 +443,8 @@ typedef struct { ARM_DEF_COND(cond) +#define ARM_LDMIA(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL)) +#define ARM_STMIA(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL)) /* stmdb sp!, {regs} */ #define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL)) diff --git a/arm/arm-fpa-codegen.h b/arm/arm-fpa-codegen.h new file mode 100644 index 0000000..7db2764 --- /dev/null +++ b/arm/arm-fpa-codegen.h @@ -0,0 +1,193 @@ +#ifndef __MONO_ARM_FPA_CODEGEN_H__ +#define __MONO_ARM_FPA_CODEGEN_H__ + +#include "arm-codegen.h" + +enum { + /* FPA registers */ + ARM_FPA_F0, + ARM_FPA_F1, + ARM_FPA_F2, + ARM_FPA_F3, + ARM_FPA_F4, + ARM_FPA_F5, + ARM_FPA_F6, + ARM_FPA_F7, + + /* transfer length for LDF/STF (T0/T1), already shifted */ + ARM_FPA_SINGLE = 0, + ARM_FPA_DOUBLE = 1 << 15, + + ARM_FPA_ADF = 0 << 20, + ARM_FPA_MUF = 1 << 20, + ARM_FPA_SUF = 2 << 20, + ARM_FPA_RSF = 3 << 20, + ARM_FPA_DVF = 4 << 20, + ARM_FPA_RDF = 5 << 20, + ARM_FPA_POW = 6 << 20, + ARM_FPA_RPW = 7 << 20, + ARM_FPA_RMF = 8 << 20, + ARM_FPA_FML = 9 << 20, + ARM_FPA_FDV = 10 << 20, + ARM_FPA_FRD = 11 << 20, + ARM_FPA_POL = 12 << 20, + + /* monadic */ + ARM_FPA_MVF = (0 << 20) | (1 << 15), + ARM_FPA_MNF = (1 << 20) | (1 << 15), + ARM_FPA_ABS = (2 << 20) | (1 << 15), + ARM_FPA_RND = (3 << 20) | (1 << 15), + ARM_FPA_SQT = (4 << 20) | (1 << 15), + ARM_FPA_LOG = (5 << 20) | (1 << 15), + ARM_FPA_LGN = (6 << 20) | (1 << 15), + ARM_FPA_EXP = (7 << 20) | (1 << 15), + ARM_FPA_SIN = (8 << 20) | (1 << 15), + ARM_FPA_COS = (9 << 20) | (1 << 15), + ARM_FPA_TAN = (10 << 20) | (1 << 15), + ARM_FPA_ASN = (11 << 20) | (1 << 15), + ARM_FPA_ACS = (12 << 20) | (1 << 15), + ARM_FPA_ATN = (13 << 20) | (1 << 15), + ARM_FPA_URD = (14 << 20) | (1 << 15), + ARM_FPA_NRM = (15 << 20) | (1 << 15), + + /* round modes */ + ARM_FPA_ROUND_NEAREST = 0, + ARM_FPA_ROUND_PINF = 1, + ARM_FPA_ROUND_MINF = 2, + ARM_FPA_ROUND_ZERO = 3, + + /* round precision */ + ARM_FPA_ROUND_SINGLE = 0, + ARM_FPA_ROUND_DOUBLE = 1, + + /* constants */ + ARM_FPA_CONST_0 = 8, + ARM_FPA_CONST_1_0 = 9, + ARM_FPA_CONST_2_0 = 10, + ARM_FPA_CONST_3_0 = 11, + ARM_FPA_CONST_4_0 = 12, + ARM_FPA_CONST_5_0 = 13, + ARM_FPA_CONST_0_5 = 14, + ARM_FPA_CONST_10 = 15, + + /* compares */ + ARM_FPA_CMF = 4, + ARM_FPA_CNF = 5, + ARM_FPA_CMFE = 6, + ARM_FPA_CNFE = 7, + + /* CPRT ops */ + ARM_FPA_FLT = 0, + ARM_FPA_FIX = 1, + ARM_FPA_WFS = 2, + ARM_FPA_RFS = 3, + ARM_FPA_WFC = 4, + ARM_FPA_RFC = 5 +}; + +#define ARM_DEF_FPA_LDF_STF(cond,post,ls,fptype,wback,basereg,fdreg,offset) \ + ((offset) >= 0? (offset)>>2: -(offset)>>2) | \ + ((1 << 8) | (fptype)) | \ + ((fdreg) << 12) | \ + ((basereg) << 16) | \ + ((ls) << 20) | \ + ((wback) << 21) | \ + (((offset) >= 0) << 23) | \ + ((wback) << 21) | \ + ((post) << 24) | \ + (6 << 25) | \ + ARM_DEF_COND(cond) + +/* FP load and stores */ +#define ARM_LDFS_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) +#define ARM_LDFS(p,freg,base,offset) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) + +#define ARM_LDFD_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) +#define ARM_LDFD(p,freg,base,offset) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) + +#define ARM_STFS_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) +#define ARM_STFS(p,freg,base,offset) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) + +#define ARM_STFD_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) +#define ARM_STFD(p,freg,base,offset) \ + ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) + +#define ARM_DEF_FPA_CPDO_MONADIC(cond,op,dreg,sreg,round,prec) \ + (1 << 8) | (14 << 24) | \ + (op) | \ + ((sreg) << 0) | \ + ((round) << 5) | \ + ((dreg) << 12) | \ + ((prec) << 7) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_FPA_CPDO_DYADIC(cond,op,dreg,sreg1,sreg2,round,prec) \ + (1 << 8) | (14 << 24) | \ + (op) | \ + ((sreg1) << 16) | \ + ((sreg2) << 0) | \ + ((round) << 5) | \ + ((dreg) << 12) | \ + ((prec) << 7) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_FPA_CMP(cond,op,sreg1,sreg2) \ + (1 << 4) | (1 << 8) | (15 << 12) | \ + (1 << 20) | (14 << 24) | \ + (op) << 21 | \ + (sreg1) << 16 | \ + (sreg2) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_FPA_CPRT(cond,op,fn,fm,rd,ftype,round) \ + (1 << 4) | (1 << 8) | (14 << 24) | \ + (op) << 20 | \ + (fm) | \ + (fn) << 16 | \ + (rd) << 12 | \ + ((round) << 5) | \ + ((ftype) << 7) | \ + ARM_DEF_COND(cond) + + +#include "arm_fpamacros.h" + +#define ARM_RNDDZ_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_ZERO,ARM_FPA_ROUND_DOUBLE)) +#define ARM_RNDDZ(p,dreg,sreg) ARM_RNDD_COND(p,dreg,sreg,ARMCOND_AL) + +/* compares */ +#define ARM_FCMP_COND(p,op,sreg1,sreg2,cond) \ + ARM_EMIT(p, ARM_DEF_FPA_CMP(cond,op,sreg1,sreg2)) +#define ARM_FCMP(p,op,sreg1,sreg2) ARM_FCMP_COND(p,op,sreg1,sreg2,ARMCOND_AL) + +/* coprocessor register transfer */ +#define ARM_FLTD(p,fn,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_DOUBLE,ARM_FPA_ROUND_NEAREST)) +#define ARM_FLTS(p,fn,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_SINGLE,ARM_FPA_ROUND_NEAREST)) + +#define ARM_FIXZ(p,rd,fm) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FIX,0,(fm),(rd),0,ARM_FPA_ROUND_NEAREST)) + +#define ARM_WFS(p,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) + +#define ARM_RFS(p,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) + +#define ARM_WFC(p,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) + +#define ARM_RFC(p,rd) \ + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) + +#endif /* __MONO_ARM_FPA_CODEGEN_H__ */ + diff --git a/arm/arm_fpamacros.h b/arm/arm_fpamacros.h new file mode 100644 index 0000000..5de16f2 --- /dev/null +++ b/arm/arm_fpamacros.h @@ -0,0 +1,419 @@ +/* Macros for FPA ops, auto-generated from template */ + + +/* dyadic */ + +/* -- ADF -- */ + + +/* Fd := Rn ADF Rm */ +#define ARM_FPA_ADFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_ADF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_ADFD(p, rd, rn, rm) \ + ARM_FPA_ADFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_ADFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_ADF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_ADFS(p, rd, rn, rm) \ + ARM_FPA_ADFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- MUF -- */ + + +/* Fd := Rn MUF Rm */ +#define ARM_FPA_MUFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_MUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_MUFD(p, rd, rn, rm) \ + ARM_FPA_MUFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_MUFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_MUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_MUFS(p, rd, rn, rm) \ + ARM_FPA_MUFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- SUF -- */ + + +/* Fd := Rn SUF Rm */ +#define ARM_FPA_SUFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_SUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_SUFD(p, rd, rn, rm) \ + ARM_FPA_SUFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_SUFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_SUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_SUFS(p, rd, rn, rm) \ + ARM_FPA_SUFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- RSF -- */ + + +/* Fd := Rn RSF Rm */ +#define ARM_FPA_RSFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RSF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_RSFD(p, rd, rn, rm) \ + ARM_FPA_RSFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_RSFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RSF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_RSFS(p, rd, rn, rm) \ + ARM_FPA_RSFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- DVF -- */ + + +/* Fd := Rn DVF Rm */ +#define ARM_FPA_DVFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_DVF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_DVFD(p, rd, rn, rm) \ + ARM_FPA_DVFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_DVFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_DVF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_DVFS(p, rd, rn, rm) \ + ARM_FPA_DVFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- RDF -- */ + + +/* Fd := Rn RDF Rm */ +#define ARM_FPA_RDFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RDF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_RDFD(p, rd, rn, rm) \ + ARM_FPA_RDFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_RDFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RDF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_RDFS(p, rd, rn, rm) \ + ARM_FPA_RDFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- POW -- */ + + +/* Fd := Rn POW Rm */ +#define ARM_FPA_POWD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_POWD(p, rd, rn, rm) \ + ARM_FPA_POWD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_POWS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_POWS(p, rd, rn, rm) \ + ARM_FPA_POWS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- RPW -- */ + + +/* Fd := Rn RPW Rm */ +#define ARM_FPA_RPWD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RPW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_RPWD(p, rd, rn, rm) \ + ARM_FPA_RPWD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_RPWS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RPW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_RPWS(p, rd, rn, rm) \ + ARM_FPA_RPWS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- RMF -- */ + + +/* Fd := Rn RMF Rm */ +#define ARM_FPA_RMFD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RMF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_RMFD(p, rd, rn, rm) \ + ARM_FPA_RMFD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_RMFS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RMF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_RMFS(p, rd, rn, rm) \ + ARM_FPA_RMFS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- FML -- */ + + +/* Fd := Rn FML Rm */ +#define ARM_FPA_FMLD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FML,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_FMLD(p, rd, rn, rm) \ + ARM_FPA_FMLD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_FMLS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FML,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_FMLS(p, rd, rn, rm) \ + ARM_FPA_FMLS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- FDV -- */ + + +/* Fd := Rn FDV Rm */ +#define ARM_FPA_FDVD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FDV,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_FDVD(p, rd, rn, rm) \ + ARM_FPA_FDVD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_FDVS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FDV,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_FDVS(p, rd, rn, rm) \ + ARM_FPA_FDVS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- FRD -- */ + + +/* Fd := Rn FRD Rm */ +#define ARM_FPA_FRDD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FRD,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_FRDD(p, rd, rn, rm) \ + ARM_FPA_FRDD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_FRDS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FRD,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_FRDS(p, rd, rn, rm) \ + ARM_FPA_FRDS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- POL -- */ + + +/* Fd := Rn POL Rm */ +#define ARM_FPA_POLD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POL,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_POLD(p, rd, rn, rm) \ + ARM_FPA_POLD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_POLS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POL,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_POLS(p, rd, rn, rm) \ + ARM_FPA_POLS_COND(p, rd, rn, rm, ARMCOND_AL) + + + +/* monadic */ + +/* -- MVF -- */ + + +/* Fd := MVF Rm */ + +#define ARM_MVFD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MVF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_MVFD(p,dreg,sreg) ARM_MVFD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_MVFS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MVF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_MVFS(p,dreg,sreg) ARM_MVFS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- MNF -- */ + + +/* Fd := MNF Rm */ + +#define ARM_MNFD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MNF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_MNFD(p,dreg,sreg) ARM_MNFD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_MNFS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MNF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_MNFS(p,dreg,sreg) ARM_MNFS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- ABS -- */ + + +/* Fd := ABS Rm */ + +#define ARM_ABSD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ABS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_ABSD(p,dreg,sreg) ARM_ABSD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_ABSS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ABS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_ABSS(p,dreg,sreg) ARM_ABSS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- RND -- */ + + +/* Fd := RND Rm */ + +#define ARM_RNDD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_RNDD(p,dreg,sreg) ARM_RNDD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_RNDS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_RNDS(p,dreg,sreg) ARM_RNDS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- SQT -- */ + + +/* Fd := SQT Rm */ + +#define ARM_SQTD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SQT,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_SQTD(p,dreg,sreg) ARM_SQTD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_SQTS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SQT,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_SQTS(p,dreg,sreg) ARM_SQTS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- LOG -- */ + + +/* Fd := LOG Rm */ + +#define ARM_LOGD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_LOG,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_LOGD(p,dreg,sreg) ARM_LOGD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_LOGS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_LOG,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_LOGS(p,dreg,sreg) ARM_LOGS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- EXP -- */ + + +/* Fd := EXP Rm */ + +#define ARM_EXPD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_EXP,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_EXPD(p,dreg,sreg) ARM_EXPD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_EXPS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_EXP,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_EXPS(p,dreg,sreg) ARM_EXPS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- SIN -- */ + + +/* Fd := SIN Rm */ + +#define ARM_SIND_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SIN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_SIND(p,dreg,sreg) ARM_SIND_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_SINS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SIN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_SINS(p,dreg,sreg) ARM_SINS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- COS -- */ + + +/* Fd := COS Rm */ + +#define ARM_COSD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_COS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_COSD(p,dreg,sreg) ARM_COSD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_COSS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_COS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_COSS(p,dreg,sreg) ARM_COSS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- TAN -- */ + + +/* Fd := TAN Rm */ + +#define ARM_TAND_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_TAN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_TAND(p,dreg,sreg) ARM_TAND_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_TANS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_TAN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_TANS(p,dreg,sreg) ARM_TANS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- ASN -- */ + + +/* Fd := ASN Rm */ + +#define ARM_ASND_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ASN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_ASND(p,dreg,sreg) ARM_ASND_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_ASNS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ASN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_ASNS(p,dreg,sreg) ARM_ASNS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- ACS -- */ + + +/* Fd := ACS Rm */ + +#define ARM_ACSD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ACS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_ACSD(p,dreg,sreg) ARM_ACSD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_ACSS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ACS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_ACSS(p,dreg,sreg) ARM_ACSS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- ATN -- */ + + +/* Fd := ATN Rm */ + +#define ARM_ATND_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ATN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_ATND(p,dreg,sreg) ARM_ATND_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_ATNS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ATN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_ATNS(p,dreg,sreg) ARM_ATNS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- URD -- */ + + +/* Fd := URD Rm */ + +#define ARM_URDD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_URD,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_URDD(p,dreg,sreg) ARM_URDD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_URDS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_URD,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_URDS(p,dreg,sreg) ARM_URDS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- NRM -- */ + + +/* Fd := NRM Rm */ + +#define ARM_NRMD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_NRM,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_NRMD(p,dreg,sreg) ARM_NRMD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_NRMS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_NRM,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_NRMS(p,dreg,sreg) ARM_NRMS_COND(p,dreg,sreg,ARMCOND_AL) + + + + + + +/* end generated */ + diff --git a/arm/fpa_macros.th b/arm/fpa_macros.th new file mode 100644 index 0000000..036b2a0 --- /dev/null +++ b/arm/fpa_macros.th @@ -0,0 +1,15 @@ +/* -- -- */ + + +/* Fd := Rn Rm */ +#define ARM_FPA_D_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_FPA_D(p, rd, rn, rm) \ + ARM_FPA_D_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_FPA_S_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_FPA_S(p, rd, rn, rm) \ + ARM_FPA_S_COND(p, rd, rn, rm, ARMCOND_AL) + + diff --git a/arm/fpam_macros.th b/arm/fpam_macros.th new file mode 100644 index 0000000..914105e --- /dev/null +++ b/arm/fpam_macros.th @@ -0,0 +1,14 @@ +/* -- -- */ + + +/* Fd := Rm */ + +#define ARM_D_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) +#define ARM_D(p,dreg,sreg) ARM_D_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_S_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) +#define ARM_S(p,dreg,sreg) ARM_S_COND(p,dreg,sreg,ARMCOND_AL) + + diff --git a/arm/fpaops.sh b/arm/fpaops.sh new file mode 100755 index 0000000..108e2bc --- /dev/null +++ b/arm/fpaops.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +DYADIC="ADF MUF SUF RSF DVF RDF POW RPW RMF FML FDV FRD POL" +MONADIC="MVF MNF ABS RND SQT LOG EXP SIN COS TAN ASN ACS ATN URD NRM" +OUTFILE=arm_fpamacros.h + +# $1: opcode list +# $2: template +function gen() { + for i in $1; do + sed "s//$i/g" $2.th >> $OUTFILE + done +} + +echo -e "/* Macros for FPA ops, auto-generated from template */\n" > $OUTFILE + +echo -e "\n/* dyadic */\n" >> $OUTFILE +gen "$DYADIC" fpa_macros + +echo -e "\n/* monadic */\n" >> $OUTFILE +gen "$MONADIC" fpam_macros + +echo -e "\n\n" >> $OUTFILE + +echo -e "\n/* end generated */\n" >> $OUTFILE + -- cgit v1.1 From 8348805e278d70da207455a0fe5cd470b00f3d8d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 30 Jul 2005 15:43:43 +0000 Subject: 2005-07-30 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. svn path=/trunk/mono/; revision=47855 --- ChangeLog | 3 +++ ia64/ia64-codegen.h | 23 ++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index 397c2fb..6ab3ec5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2005-07-30 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. Wed Jul 20 18:01:54 BST 2005 Paolo Molaro diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 14d70f2..be254c0 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -148,8 +148,9 @@ typedef enum { */ typedef struct { - gboolean automatic; guint8 *buf; + guint automatic : 1; + guint one_ins_per_bundle : 1; guint64 instructions [3]; int itypes [3], stops [3]; int nins, template; @@ -175,6 +176,7 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); code.buf = codegen_buf; \ code.nins = 0; \ code.automatic = 1; \ + code.one_ins_per_bundle = 0; \ } while (0) #define ia64_codegen_close(code) do { \ @@ -185,14 +187,15 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); ia64_emit_bundle (&code, TRUE); \ } while (0) -#define ia64_end_bundle(code) do { \ - ia64_emit_bundle (&code, TRUE); \ -} while (0) - #define ia64_codegen_set_automatic(code, is_automatic) do { \ code.automatic = (is_automatic); \ } while (0) +#define ia64_codegen_set_one_ins_per_bundle(code, is_one) do { \ + ia64_begin_bundle (code); \ + code.one_ins_per_bundle = (is_one); \ +} while (0) + #define ia64_stop(code) do { \ g_assert ((code.nins > 0)); \ code.stops [code.nins - 1] = 1; \ @@ -203,14 +206,20 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); code.template = (bundle_template); \ } while (0) -/* To ease debugging, we emit instructions immediately */ +#if 0 +/* To ease debugging, emit instructions immediately */ +#define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); +#else +#define EMIT_BUNDLE(itype, code) if ((itype == IA64_INS_TYPE_LX) && (code.nins == 2)) ia64_emit_bundle (&code, FALSE); +#endif + #define ia64_emit_ins(code, itype, ins) do { \ if (G_LIKELY (code.automatic)) { \ code.instructions [code.nins] = ins; \ code.itypes [code.nins] = itype; \ code.stops [code.nins] = 1; \ code.nins ++; \ - if ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); \ + EMIT_BUNDLE (itype, code); \ if (code.nins == 3) \ ia64_emit_bundle (&code, FALSE); \ } else { \ -- cgit v1.1 From f1bce593b3504a82fc344d696eeedd91c39bcfee Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 4 Aug 2005 18:51:34 +0000 Subject: Uncommitted fixes. svn path=/trunk/mono/; revision=48015 --- arm/arm-fpa-codegen.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arm/arm-fpa-codegen.h b/arm/arm-fpa-codegen.h index 7db2764..37653b0 100644 --- a/arm/arm-fpa-codegen.h +++ b/arm/arm-fpa-codegen.h @@ -102,22 +102,22 @@ enum { #define ARM_LDFS_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) #define ARM_LDFS(p,freg,base,offset) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) + ARM_LDFS_COND(p,freg,base,offset,ARMCOND_AL) #define ARM_LDFD_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) #define ARM_LDFD(p,freg,base,offset) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) + ARM_LDFD_COND(p,freg,base,offset,ARMCOND_AL) #define ARM_STFS_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) #define ARM_STFS(p,freg,base,offset) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) + ARM_STFS_COND(p,freg,base,offset,ARMCOND_AL) #define ARM_STFD_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) #define ARM_STFD(p,freg,base,offset) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF(ARMCOND_AL,1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) + ARM_STFD_COND(p,freg,base,offset,ARMCOND_AL) #define ARM_DEF_FPA_CPDO_MONADIC(cond,op,dreg,sreg,round,prec) \ (1 << 8) | (14 << 24) | \ @@ -175,7 +175,7 @@ enum { ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_SINGLE,ARM_FPA_ROUND_NEAREST)) #define ARM_FIXZ(p,rd,fm) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FIX,0,(fm),(rd),0,ARM_FPA_ROUND_NEAREST)) + ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FIX,0,(fm),(rd),0,ARM_FPA_ROUND_ZERO)) #define ARM_WFS(p,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) -- cgit v1.1 From d151f0e0b203a78ca99cab91d9df89ffe7728880 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 17 Aug 2005 20:28:30 +0000 Subject: 2005-08-17 Zoltan Varga * ia64/ia64-codegen.h: Add dependency information for all instructions. svn path=/trunk/mono/; revision=48476 --- ChangeLog | 4 + ia64/ia64-codegen.h | 474 ++++++++++++++++++++++++++++++++-------------------- 2 files changed, 299 insertions(+), 179 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6ab3ec5..bb5b8c1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-08-17 Zoltan Varga + + * ia64/ia64-codegen.h: Add dependency information for all instructions. + 2005-07-30 Zoltan Varga * ia64/ia64-codegen.h: Ongoing IA64 work. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index be254c0..c6aeccd 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -117,7 +117,7 @@ typedef enum { /* disassembly */ #define ia64_bundle_template(code) ((*(guint64*)(gpointer)code) & 0x1f) #define ia64_bundle_ins1(code) (((*(guint64*)(gpointer)code) >> 5) & 0x1ffffffffff) -#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x3ffff) << 18)) +#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x7fffff) << 18)) #define ia64_bundle_ins3(code) ((((guint64*)(gpointer)code)[1]) >> 23) #define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37) @@ -131,7 +131,9 @@ typedef enum { #define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7) #define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1) #define ia64_ins_x2a(ins) ((((guint64)(ins)) >> 34) & 0x3) +#define ia64_ins_x2b(ins) ((((guint64)(ins)) >> 27) & 0x3) #define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7) +#define ia64_ins_x4(ins) ((((guint64)(ins)) >> 29) & 0xf) #define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f) #define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1) #define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1) @@ -139,6 +141,30 @@ typedef enum { #define IA64_NOP_I ((0x01 << 27)) #define IA64_NOP_M ((0x01 << 27)) +#define IA64_NOP_B (((long)0x02 << 37)) +#define IA64_NOP_F ((0x01 << 27)) +#define IA64_NOP_X ((0x01 << 27)) + +/* + * READ_PR_BRANCH and WRITE_PR_FLOAT are used to be able to place comparisons + * + branches in the same instruction group. + */ +typedef enum { + IA64_READ_GR, + IA64_WRITE_GR, + IA64_READ_PR, + IA64_WRITE_PR, + IA64_READ_PR_BRANCH, + IA64_WRITE_PR_FLOAT, + IA64_READ_BR, + IA64_WRITE_BR, + IA64_READ_FR, + IA64_WRITE_FR, + IA64_READ_AR, + IA64_WRITE_AR, + IA64_END_OF_INS, + IA64_NONE +} Ia64Dependency; /* * IA64 code cannot be emitted in the same way as code on other processors, @@ -147,13 +173,16 @@ typedef enum { * */ +#define IA64_INS_BUFFER_SIZE 4 + typedef struct { guint8 *buf; guint automatic : 1; guint one_ins_per_bundle : 1; - guint64 instructions [3]; - int itypes [3], stops [3]; - int nins, template; + guint64 instructions [IA64_INS_BUFFER_SIZE]; + int itypes [IA64_INS_BUFFER_SIZE], stops [IA64_INS_BUFFER_SIZE]; + guint8 dep_info [128]; + int nins, template, dep_info_pos; } Ia64CodegenState; #ifdef IA64_SIMPLE_EMIT_BUNDLE @@ -177,6 +206,7 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); code.nins = 0; \ code.automatic = 1; \ code.one_ins_per_bundle = 0; \ + code.dep_info_pos = 0; \ } while (0) #define ia64_codegen_close(code) do { \ @@ -217,10 +247,11 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); if (G_LIKELY (code.automatic)) { \ code.instructions [code.nins] = ins; \ code.itypes [code.nins] = itype; \ - code.stops [code.nins] = 1; \ code.nins ++; \ + code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \ + code.dep_info [code.dep_info_pos ++] = 0; \ EMIT_BUNDLE (itype, code); \ - if (code.nins == 3) \ + if (code.nins == IA64_INS_BUFFER_SIZE) \ ia64_emit_bundle (&code, FALSE); \ } else { \ g_assert (code.nins < 3); \ @@ -278,6 +309,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) } code->nins = 0; + code->dep_info_pos = 0; } #endif /* IA64_SIMPLE_EMIT_BUNDLE */ @@ -333,11 +365,82 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define check_sf(sf) check_assert ((sf) >= 0 && (sf) <= 3) -#define check_gregs(r1,r2,r3) do { check_greg ((r1)); check_greg ((r2)); check_greg ((r3)); } while (0) +#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0) -#define check_pregs(p1,p2) do { check_preg ((p1)); check_preg ((p2)); } while (0) +/* Dependency info */ +#define read_gr(code, gr) do { \ + check_greg ((gr)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_GR; \ + code.dep_info [code.dep_info_pos ++] = gr; \ +} while (0) -#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0) +#define write_gr(code, gr) do { \ + check_greg ((gr)); \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_GR; \ + code.dep_info [code.dep_info_pos ++] = gr; \ +} while (0) + +#define read_pr(code,pr) do { \ + if ((pr) != 0) { \ + check_preg ((pr)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_PR; \ + code.dep_info [code.dep_info_pos ++] = (pr); \ + } \ +} while (0) + +#define write_pr(code,pr) do { \ + if ((pr) != 0) { \ + check_preg ((pr)); \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR; \ + code.dep_info [code.dep_info_pos ++] = (pr); \ + } \ +} while (0) + +#define read_pr_branch(code,reg) do { \ + check_preg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_PR_BRANCH; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define write_pr_fp(code,reg) do { \ + check_preg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR_FLOAT; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define read_br(code,reg) do { \ + check_breg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_BR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define write_br(code,reg) do { \ + check_breg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_BR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define read_fr(code,reg) do { \ + check_freg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_FR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define write_fr(code,reg) do { \ + check_freg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_FR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define read_ar(code,reg) do { \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_AR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + +#define write_ar(code,reg) do { \ + code.dep_info [code.dep_info_pos ++] = IA64_WRITE_AR; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) #define ia64_emit_ins_1(code,itype,f1,o1) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)))) @@ -357,7 +460,11 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11)))) -#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) +/* + * A-Unit instructions + */ + +#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) #define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0) #define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1) @@ -369,12 +476,12 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2) #define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3) -#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) +#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0) #define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count)) #define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count)) -#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { check_greg ((r1)); check_greg ((r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0) +#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0) #define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1) #define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0) @@ -382,16 +489,16 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2) #define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3) -#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { check_greg ((r1)); check_greg ((r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0) +#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0) #define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0) #define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0) -#define ia64_a5(code, qp, r1, imm, r3) do { check_greg ((r1)); check_greg ((r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0) +#define ia64_a5(code, qp, r1, imm, r3) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0) -#define ia64_addl_imm_pred(code, qp,r1,imm22,r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3)) +#define ia64_addl_imm_pred(code, qp, r1, imm22, r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3)) -#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) +#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) #define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0) #define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0) @@ -420,23 +527,23 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 1) /* Pseudo ops */ -#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq ((code), (p2), (p1), (r2), (r3)) -#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p2), (p1), (r3), (r2)) -#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p1), (p2), (r3), (r2)) -#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt ((code), (p2), (p1), (r2), (r3)) -#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p2), (p1), (r3), (r2)) -#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p1), (p2), (r3), (r2)) -#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu ((code), (p2), (p1), (r2), (r3)) - -#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq ((code), (p2), (p1), (r2), (r3)) -#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p2), (p1), (r3), (r2)) -#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p1), (p2), (r3), (r2)) -#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt ((code), (p2), (p1), (r2), (r3)) -#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p2), (p1), (r3), (r2)) -#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p1), (p2), (r3), (r2)) -#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu ((code), (p2), (p1), (r2), (r3)) - -#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { check_greg ((r2)); check_greg ((r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) +#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), (qp), (p2), (p1), (r2), (r3)) +#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r3), (r2)) +#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p1), (p2), (r3), (r2)) +#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r2), (r3)) +#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2)) +#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2)) +#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3)) + +#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), (qp), (p2), (p1), (r2), (r3)) +#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r3), (r2)) +#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p1), (p2), (r3), (r2)) +#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r2), (r3)) +#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2)) +#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2)) +#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3)) + +#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code, (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) #define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0) #define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0) @@ -464,7 +571,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1) #define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1) -#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { check_greg ((r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) +#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) #define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0) #define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0) @@ -509,7 +616,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3)) #define ia64_cmp4_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3)) -#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) +#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) #define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0) #define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0) @@ -544,43 +651,47 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1) #define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1) -#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { check_gregs ((r1), (r2), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) +#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0) #define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count); #define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count); #define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3))) -#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { check_gregs ((r1), (r2), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +/* + * I-Unit Instructions + */ + +#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count)); #define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count)); -#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) - -#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3) -#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3) -#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2) -#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) -#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) -#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2) -#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) -#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) -#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0) -#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0) -#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0) -#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1) -#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1) -#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1) -#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1) -#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1) -#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1) -#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0) -#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1) -#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0) -#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1) -#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2) +#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) + +#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3) +#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3) +#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2) +#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) +#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) +#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2) +#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2) +#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2) +#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0) +#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0) +#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0) +#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1) +#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1) +#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1) +#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1) +#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1) +#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1) +#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0) +#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1) +#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0) +#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1) +#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2) typedef enum { IA64_MUX1_BRCST = 0x0, @@ -590,15 +701,15 @@ typedef enum { IA64_MUX1_REV = 0xb } Ia64Mux1Permutation; -#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) +#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) #define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2) -#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) +#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0) #define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2) -#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0) #define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0) @@ -607,54 +718,54 @@ typedef enum { #define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0) #define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0) -#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { check_greg ((r1)); check_greg ((r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0) #define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0) #define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0) #define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0) -#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) #define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) #define ia64_shl_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1) -#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), (r2), 0); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1) #define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1) -#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2) -#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { check_gregs ((r1), (r2), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0) +#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0) #define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0) -#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) +#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) #define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0) #define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1) -#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) +#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0) #define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0) -#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1) -#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1) -#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0) +#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0) #define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len)) -#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) +#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) #define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0) #define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1) @@ -665,7 +776,7 @@ typedef enum { #define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0) #define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1) -#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { check_pregs ((p1), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) +#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0) #define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0) #define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1) @@ -676,20 +787,20 @@ typedef enum { #define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0) #define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1) -#define ia64_i18(code, qp, imm, x3, x6, y) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_i18(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0) #define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1) -#define ia64_i19(code, qp, imm, x3, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_i19(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0) -#define ia64_i20(code, qp, r2, imm, x3) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); check_imm21 ((imm)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1) -#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { check_imm8 (tag13); check_gregs (0, (r2), 0); check_breg ((b1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0) +#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { read_pr ((code), (qp)); check_imm8 (tag13); write_br ((code), (b1)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0) typedef enum { IA64_MOV_TO_BR_WH_SPTK = 0, @@ -712,36 +823,36 @@ typedef enum { /* End of pseudo ops */ -#define ia64_i22(code, qp, r1, b2, x3, x6) do { check_gregs ((r1), 0, 0); check_breg ((b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i22(code, qp, r1, b2, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_br ((code), (b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_from_br_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31); -#define ia64_i23(code, qp, r2, mask, x3) do { check_greg ((r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0) +#define ia64_i23(code, qp, r2, mask, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0) #define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3) -#define ia64_i24(code, qp, imm, x3) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i24(code, qp, imm, x3) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2) -#define ia64_i25(code, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i25(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30) #define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33) -#define ia64_i26(code, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i26(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a) -#define ia64_i27(code, qp, ar3, imm, x3, x6) do { check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_i27(code, qp, ar3, imm, x3, x6) do { read_pr ((code), (qp)); write_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a) -#define ia64_i28(code, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i28(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32) -#define ia64_i29(code, qp, r1, r3, x3, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) +#define ia64_i29(code, qp, r1, r3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0) #define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10) #define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11) @@ -755,7 +866,7 @@ typedef enum { #define ia64_czx2_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1D) /* - * M Instruction Type + * M-Unit Instructions */ typedef enum { @@ -769,7 +880,7 @@ typedef enum { IA64_ST_HINT_NTA = 3 } Ia64StoreHint; -#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00) #define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01) @@ -818,10 +929,11 @@ typedef enum { #define ia64_ld4_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2A) #define ia64_ld8_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2B) -#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28) +/* FIXME: This writes AR.CSD */ +#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28); #define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C) -#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); ; ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00) #define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01) @@ -870,7 +982,7 @@ typedef enum { #define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A) #define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B) -#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { check_gregs ((r1), 0, (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00) #define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01) @@ -1067,7 +1179,7 @@ typedef enum { /* End of pseudo ops */ -#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30) #define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31) @@ -1075,8 +1187,11 @@ typedef enum { #define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33) /* Pseudo ops */ + #define ia64_st8_pred(code, qp, r3, r2) ia64_st8_hint_pred ((code), (qp), (r3), (r2), 0) +/* End of pseudo ops */ + #define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34) #define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35) #define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36) @@ -1087,7 +1202,7 @@ typedef enum { #define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30) #define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34) -#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { check_gregs (0, (r2), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30) #define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31) @@ -1101,7 +1216,7 @@ typedef enum { #define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B) -#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02) #define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03) @@ -1135,7 +1250,7 @@ typedef enum { #define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B) -#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02) #define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03) @@ -1169,7 +1284,7 @@ typedef enum { #define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B) -#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); check_freg ((f1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02) #define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03) @@ -1303,7 +1418,7 @@ typedef enum { /* End of pseudo ops */ -#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32) #define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33) @@ -1311,7 +1426,7 @@ typedef enum { #define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30) #define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B) -#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { check_greg ((r3)); check_freg ((f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_fr ((code), (f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32) #define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33) @@ -1319,7 +1434,7 @@ typedef enum { #define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30) #define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B) -#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02) #define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03) @@ -1345,7 +1460,7 @@ typedef enum { #define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27) #define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25) -#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { check_greg ((r3)); check_freg ((f1)); check_freg ((f2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); write_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02) #define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03) @@ -1378,28 +1493,28 @@ typedef enum { IA64_LFHINT_NTA = 3 } Ia64LinePrefetchHint; -#define ia64_m13(code, qp, r3, hint, m, x, x6) do { check_greg ((r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m13(code, qp, r3, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C) #define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D) #define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E) #define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F) -#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { check_greg ((r3)); check_greg ((r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C) #define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D) #define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E) #define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F) -#define ia64_m15(code, qp, r3, imm, hint, x6) do { check_greg ((r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) +#define ia64_m15(code, qp, r3, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0) #define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C) #define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D) #define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E) #define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F) -#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { check_gregs ((r1), (r2), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00) #define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01) @@ -1418,46 +1533,46 @@ typedef enum { #define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3))) -#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm = (imm) < 0 ? - (imm) : (imm); check_gregs ((r1), 0, (r3)); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); int aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10_ns ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16) #define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17) -#define ia64_m18(code, qp, f1, r2, m, x, x6) do { check_greg ((r2)); check_freg ((f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0) +#define ia64_m18(code, qp, f1, r2, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_fr ((code), (f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0) #define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C) #define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D) #define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E) #define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F) -#define ia64_m19(code, qp, r1, f2, m, x, x6) do { check_greg ((r1)); check_freg ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m19(code, qp, r1, f2, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C) #define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D) #define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E) #define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F) -#define ia64_m20(code, qp, r2, imm, x3) do { check_greg ((r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) +#define ia64_m20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) #define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1) -#define ia64_m21(code, qp, f2, imm, x3) do { check_freg ((f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) +#define ia64_m21(code, qp, f2, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0) #define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3) -#define ia64_m22(code, qp, r1, imm, x3) do { check_greg ((r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m22(code, qp, r1, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4) #define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5) -#define ia64_m23(code, qp, f1, imm, x3) do { check_freg ((f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m23(code, qp, f1, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6) #define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7) -#define ia64_m24(code, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m24(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1) #define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2) @@ -1467,64 +1582,65 @@ typedef enum { #define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3) #define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3) -#define ia64_m25(code, qp, x3, x4, x2) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m25(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0) #define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0) -#define ia64_m26(code, qp, r1, x3, x4, x2) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m26(code, qp, r1, x3, x4, x2) do { read_pr ((code), (qp)); read_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1) -#define ia64_m27(code, qp, f1, x3, x4, x2) do { check_freg ((f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) +#define ia64_m27(code, qp, f1, x3, x4, x2) do { read_pr ((code), (qp)); read_fr ((code), (f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0) #define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1) -#define ia64_m28(code, qp, r3, x3, x6, x) do { check_greg ((r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0) +#define ia64_m28(code, qp, r3, x3, x6, x) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0) #define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0) #define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1) -#define ia64_m29(code, qp, ar3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m29(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a) -#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2) -#define ia64_m31(code, qp, r1, ar3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m31(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22) -#define ia64_m32(code, qp, cr3, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) + +#define ia64_m32(code, qp, cr3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C) -#define ia64_m33(code, qp, r1, cr3, x3, x6) do { check_greg ((r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m33(code, qp, r1, cr3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24) -#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { check_greg ((r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); ia64_begin_bundle ((code)); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0) -#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0) +#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { read_pr ((code), (qp)); check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0) -#define ia64_m35(code, qp, r2, x3, x6) do { check_greg ((r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m35(code, qp, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D) #define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29) -#define ia64_m36(code, qp, r1, x3, x6) do { check_greg ((r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0) +#define ia64_m36(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0) #define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25) #define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21) -#define ia64_m37(code, qp, imm, x3, x2, x4) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m37(code, qp, imm, x3, x2, x4) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0) /* The System/Memory Management instruction encodings (M38-M47) are missing */ -#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) +#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0) #define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0) #define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1) @@ -1546,29 +1662,29 @@ typedef enum { IA64_DH_CLR = 1 } Ia64BranchCacheDeallocHint; -#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) +#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { read_pr_branch ((code), (qp)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) #define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0) #define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2) #define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3) -#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) +#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0) #define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5) #define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6) #define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7) -#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); check_breg ((b1)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) #define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) -#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) +#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) #define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0) #define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1) #define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4) -#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { check_breg ((b1)); check_breg ((b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) +#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) #define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) @@ -1601,7 +1717,7 @@ typedef enum { /* B6 and B7 is missing */ -#define ia64_b8(code, qp, x6) do { ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0) +#define ia64_b8(code, qp, x6) do { read_pr ((code), (qp)); ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0) #define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02) #define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04) @@ -1611,17 +1727,17 @@ typedef enum { #define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D) #define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10) -#define ia64_b9(code, qp, imm, opcode, x6) do { check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) +#define ia64_b9(code, qp, imm, opcode, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) #define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00) #define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00) #define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01) /* - * FLOATING POINT + * F-Unit Instructions */ -#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0) +#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); read_fr ((code), (f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0) #define ia64_fma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 0) #define ia64_fma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 1) @@ -1640,17 +1756,17 @@ typedef enum { #define ia64_fnorm_s_sf_pred(code, qp, f1, f3, sf) ia64_fma_s_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf)) #define ia64_fnorm_d_sf_pred(code, qp, f1, f3, sf) ia64_fma_d_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf)) -#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0) +#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0) #define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0) #define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3) #define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2) -#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_fr ((f4)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0) +#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0) #define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0) -#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { check_fr ((f2)); check_fr ((f3)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0) +#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { read_pr ((code), (qp)); read_fr ((code), (f2)); read_fr ((code), (f3)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0) #define ia64_fcmp_eq_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 0) #define ia64_fcmp_lt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 0) @@ -1671,22 +1787,22 @@ typedef enum { #define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf)) #define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf)) -#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { check_fr ((f2)); check_preg ((p1)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0) +#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { read_pr ((code), (qp)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0) #define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0) #define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1) -#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); check_preg ((p2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) +#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) #define ia64_frcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 0, 1, 0) #define ia64_fprcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 1, 1, 0) -#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { check_fr ((f1)); check_fr ((f3)); check_preg ((p2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) +#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0) #define ia64_frsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 0, 1, 1) #define ia64_fprsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 1, 1, 1) -#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) +#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) #define ia64_fmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x14) #define ia64_fman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x15) @@ -1705,7 +1821,7 @@ typedef enum { #define ia64_fpcmp_nle_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x36) #define ia64_fpcmp_ord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x37) -#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { check_fr ((f1)); check_fr ((f2)); check_fr ((f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0) +#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0) #define ia64_fmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10) #define ia64_fmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11) @@ -1730,7 +1846,7 @@ typedef enum { /* Pseudo ops */ #define ia64_fmov_pred(code, qp, f1, f3) ia64_fmerge_s_pred ((code), (qp), (f1), (f3), (f3)) -#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { check_sf ((sf)); check_fr ((f1)); check_fr ((f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) +#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) #define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18) #define ia64_fcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x19) @@ -1741,23 +1857,23 @@ typedef enum { #define ia64_fpcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1A) #define ia64_fpcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1B) -#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { check_fr ((f1)); check_fr ((f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0) +#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0) #define ia64_fcvt_xf_pred(code, qp, f1, f2) ia64_f11 ((code), (qp), (f1), (f2), 0, 0, 0x1C) -#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) +#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) #define ia64_fsetc_sf_pred(code, qp, amask, omask, sf) ia64_f12 ((code), (qp), (amask), (omask), (sf), 0, 0, 0x04) -#define ia64_f13(code, qp, sf, opcode, x, x6) do { ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) +#define ia64_f13(code, qp, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0) #define ia64_fclrf_sf_pred(code, qp, sf) ia64_f13 ((code), (qp), (sf), 0, 0, 0x05) -#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) +#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0) #define ia64_fchkf_sf_pred(code, qp, disp, sf) ia64_f14 ((code), (qp), (disp), (sf), 0, 0, 0x8) -#define ia64_f15(code, qp, imm, opcode, x, x6) do { check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) +#define ia64_f15(code, qp, imm, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0) #define ia64_break_f_pred(code, qp, imm) ia64_f15 ((code), (qp), (imm), 0, 0, 0x0) @@ -1765,23 +1881,23 @@ typedef enum { * X-UNIT ENCODINGS */ -#define ia64_x1(code, qp, imm, x3, x6) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_x1(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { check_greg ((r1)); if (code.automatic) ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic) ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0) +#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0) #define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0) -#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { check_breg ((b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0) +#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0) #define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) -#define ia64_x5(code, qp, imm, x3, x6, y) do { check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) +#define ia64_x5(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0) #define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0) #define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1) @@ -1980,28 +2096,28 @@ typedef enum { #define ia64_pmpyshr2_u(code, r1, r2, r3, count) ia64_pmpyshr2_u_pred ((code), 0, r1, r2, r3, count) -#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3) -#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3) -#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3) -#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3) -#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3) -#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3) -#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3) -#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3) -#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3) -#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3) -#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3) -#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3) -#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3) -#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3) -#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3) -#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3) -#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3) -#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3) -#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3) -#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3) -#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3) -#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3) +#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3) +#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3) +#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3) +#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3) +#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3) +#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3) +#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3) +#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3) +#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3) +#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3) +#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3) +#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3) +#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3) +#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3) +#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3) +#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3) #define ia64_mux1(code, r1, r2, mbtype) ia64_mux1_pred ((code), 0, r1, r2, mbtype) -- cgit v1.1 From cac0da0afb2a782de1db55a000a2125531e757fd Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 20 Aug 2005 22:16:11 +0000 Subject: 2005-08-21 Zoltan Varga * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs. svn path=/trunk/mono/; revision=48614 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index bb5b8c1..8ba7b70 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-08-21 Zoltan Varga + + * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs. + 2005-08-17 Zoltan Varga * ia64/ia64-codegen.h: Add dependency information for all instructions. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index c6aeccd..4d5e5cc 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -158,6 +158,7 @@ typedef enum { IA64_WRITE_PR_FLOAT, IA64_READ_BR, IA64_WRITE_BR, + IA64_READ_BR_BRANCH, IA64_READ_FR, IA64_WRITE_FR, IA64_READ_AR, @@ -420,6 +421,12 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) code.dep_info [code.dep_info_pos ++] = (reg); \ } while (0) +#define read_br_branch(code,reg) do { \ + check_breg ((reg)); \ + code.dep_info [code.dep_info_pos ++] = IA64_READ_BR_BRANCH; \ + code.dep_info [code.dep_info_pos ++] = (reg); \ +} while (0) + #define read_fr(code,reg) do { \ check_freg ((reg)); \ code.dep_info [code.dep_info_pos ++] = IA64_READ_FR; \ @@ -725,7 +732,7 @@ typedef enum { #define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0) #define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0) -#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1) #define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1) @@ -736,7 +743,7 @@ typedef enum { #define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1) #define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1) -#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) +#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0) #define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2) @@ -1674,7 +1681,7 @@ typedef enum { #define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6) #define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7) -#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); } while (0) +#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); ia64_begin_bundle ((code)); } while (0) #define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) @@ -1684,7 +1691,7 @@ typedef enum { #define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1) #define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4) -#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); } while (0) +#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); ia64_begin_bundle ((code)); } while (0) #define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh)) @@ -1885,7 +1892,7 @@ typedef enum { #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic) ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic && (code.nins > IA64_INS_BUFFER_SIZE - 2)) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -- cgit v1.1 From d4b1ea47e0395555276e1a6c8ddfa3800692b6ea Mon Sep 17 00:00:00 2001 From: Wade Berrier Date: Fri, 26 Aug 2005 06:48:41 +0000 Subject: Include files for 'make dist' svn path=/trunk/mono/; revision=48871 --- ia64/Makefile.am | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ia64/Makefile.am b/ia64/Makefile.am index e69de29..9fd2fe3 100644 --- a/ia64/Makefile.am +++ b/ia64/Makefile.am @@ -0,0 +1,7 @@ +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-ia64.la + +libmonoarch_ia64_la_SOURCES = tramp.c ia64-codegen.h + + -- cgit v1.1 From 16291812e22e9750bf101e297fc573ce35bab382 Mon Sep 17 00:00:00 2001 From: Wade Berrier Date: Fri, 26 Aug 2005 06:58:33 +0000 Subject: Oops svn path=/trunk/mono/; revision=48874 --- ia64/Makefile.am | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ia64/Makefile.am b/ia64/Makefile.am index 9fd2fe3..139597f 100644 --- a/ia64/Makefile.am +++ b/ia64/Makefile.am @@ -1,7 +1,2 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-ia64.la - -libmonoarch_ia64_la_SOURCES = tramp.c ia64-codegen.h -- cgit v1.1 From 9a52b3ea85b1899c6cc23263eec6879841b3fd08 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 26 Aug 2005 13:34:24 +0000 Subject: 2005-08-26 Zoltan Varga * ia64/Makefile.am: Distribute ia64-codegen.h. svn path=/trunk/mono/; revision=48891 --- ChangeLog | 4 ++++ ia64/Makefile.am | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8ba7b70..c315eb7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-08-26 Zoltan Varga + + * ia64/Makefile.am: Distribute ia64-codegen.h. + 2005-08-21 Zoltan Varga * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs. diff --git a/ia64/Makefile.am b/ia64/Makefile.am index 139597f..e03ea47 100644 --- a/ia64/Makefile.am +++ b/ia64/Makefile.am @@ -1,2 +1,3 @@ +EXTRA_DIST = ia64-codegen.h -- cgit v1.1 From 4e89407a4a8dc38125a804df930515a31603cdca Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 27 Aug 2005 14:33:09 +0000 Subject: 2005-08-27 Zoltan Varga * ia64/ia64-codegen.h: Fix some bugs. * ia64/codegen.c: Update to work with latest ia64-codegen.h svn path=/trunk/mono/; revision=48969 --- ChangeLog | 6 ++++++ ia64/codegen.c | 8 +++++++- ia64/ia64-codegen.h | 6 +++--- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index c315eb7..43647c1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2005-08-27 Zoltan Varga + + * ia64/ia64-codegen.h: Fix some bugs. + + * ia64/codegen.c: Update to work with latest ia64-codegen.h + 2005-08-26 Zoltan Varga * ia64/Makefile.am: Distribute ia64-codegen.h. diff --git a/ia64/codegen.c b/ia64/codegen.c index 66398e1..97e1aef 100644 --- a/ia64/codegen.c +++ b/ia64/codegen.c @@ -4,6 +4,10 @@ #include #include +#include + +#define IA64_SIMPLE_EMIT_BUNDLE + #include void @@ -817,7 +821,9 @@ main () ia64_fchkf_sf_pred ((code), 1, -1, 3); - ia64_break_f_pred ((code), 1, 0x123456); + ia64_break_f_pred ((code), 1, 0x1234); + + ia64_movl (code, 31, -123456); ia64_codegen_close (code); diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 4d5e5cc..6c84d13 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -550,7 +550,7 @@ ia64_emit_bundle (Ia64CodegenState *code, gboolean flush) #define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2)) #define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3)) -#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code, (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) +#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0) #define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0) #define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0) @@ -1540,7 +1540,7 @@ typedef enum { #define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3))) -#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); int aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10_ns ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); int aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) @@ -1892,7 +1892,7 @@ typedef enum { #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic && (code.nins > IA64_INS_BUFFER_SIZE - 2)) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (guint64)(imm) & 0x7f, (13), (vc), 20, ((guint64)(imm) >> 21) & 0x1, 21, ((guint64)(imm) >> 16) & 0x1f, 22, ((guint64)(imm) >> 7) & 0x1ff, 27, ((guint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic && (code.nins > IA64_INS_BUFFER_SIZE - 2)) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -- cgit v1.1 From 8b07d9836f60fee4ff83a14ce110921be8ef8f2e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 3 Sep 2005 22:06:10 +0000 Subject: 2005-09-04 Zoltan Varga * ia64/ia64-codegen.h (ia64_no_stop): New macro. svn path=/trunk/mono/; revision=49399 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index 43647c1..62a8105 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-09-04 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_no_stop): New macro. + 2005-08-27 Zoltan Varga * ia64/ia64-codegen.h: Fix some bugs. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 6c84d13..558e052 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -163,6 +163,7 @@ typedef enum { IA64_WRITE_FR, IA64_READ_AR, IA64_WRITE_AR, + IA64_NO_STOP, IA64_END_OF_INS, IA64_NONE } Ia64Dependency; @@ -262,6 +263,11 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); } \ } while (0) +#define ia64_no_stop(code) do { \ + code.dep_info [code.dep_info_pos ++] = IA64_NO_STOP; \ + code.dep_info [code.dep_info_pos ++] = 0; \ +} while (0) + #if G_BYTE_ORDER != G_LITTLE_ENDIAN #error "FIXME" #endif -- cgit v1.1 From efbd8e41cf3337d59812a7cca48df3caee116b07 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 10 Sep 2005 20:50:37 +0000 Subject: 2005-09-10 Zoltan Varga * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions. Integrate emission of unwind directives into the assembly macros. svn path=/trunk/mono/; revision=49875 --- ChangeLog | 5 ++++ ia64/ia64-codegen.h | 76 +++++++++++++++++++++++++---------------------------- 2 files changed, 41 insertions(+), 40 deletions(-) diff --git a/ChangeLog b/ChangeLog index 62a8105..d844653 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2005-09-10 Zoltan Varga + + * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions. + Integrate emission of unwind directives into the assembly macros. + 2005-09-04 Zoltan Varga * ia64/ia64-codegen.h (ia64_no_stop): New macro. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 558e052..11c0e81 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -11,6 +11,10 @@ #define _IA64_CODEGEN_H_ #include +#include + +#define UNW_LOCAL_ONLY +#include typedef enum { IA64_INS_TYPE_A, @@ -176,15 +180,19 @@ typedef enum { */ #define IA64_INS_BUFFER_SIZE 4 +#define MAX_UNW_OPS 8 typedef struct { guint8 *buf; - guint automatic : 1; guint one_ins_per_bundle : 1; + int nins, template, dep_info_pos, unw_op_pos, unw_op_count; guint64 instructions [IA64_INS_BUFFER_SIZE]; - int itypes [IA64_INS_BUFFER_SIZE], stops [IA64_INS_BUFFER_SIZE]; + int itypes [IA64_INS_BUFFER_SIZE]; + guint8 *region_start; guint8 dep_info [128]; - int nins, template, dep_info_pos; + unw_dyn_op_t unw_ops [MAX_UNW_OPS]; + /* The index of the instruction to which the given unw op belongs */ + guint8 unw_ops_pos [MAX_UNW_OPS]; } Ia64CodegenState; #ifdef IA64_SIMPLE_EMIT_BUNDLE @@ -193,22 +201,14 @@ G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flu void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); #endif -/* - * There are two code generation modes: - * - in automatic mode, bundling and stops are handled automatically by the - * code generation macros. - * FIXME: In order to simplify things, we emit a stop after every instruction for - * now. - * - in non-automatic mode, the caller is responsible for handling bundling and - * stops using the appropriate macros. - */ - #define ia64_codegen_init(code, codegen_buf) do { \ code.buf = codegen_buf; \ + code.region_start = code.buf; \ code.nins = 0; \ - code.automatic = 1; \ code.one_ins_per_bundle = 0; \ code.dep_info_pos = 0; \ + code.unw_op_count = 0; \ + code.unw_op_pos = 0; \ } while (0) #define ia64_codegen_close(code) do { \ @@ -219,25 +219,28 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); ia64_emit_bundle (&code, TRUE); \ } while (0) -#define ia64_codegen_set_automatic(code, is_automatic) do { \ - code.automatic = (is_automatic); \ -} while (0) - #define ia64_codegen_set_one_ins_per_bundle(code, is_one) do { \ ia64_begin_bundle (code); \ code.one_ins_per_bundle = (is_one); \ } while (0) -#define ia64_stop(code) do { \ - g_assert ((code.nins > 0)); \ - code.stops [code.nins - 1] = 1; \ -} while (0) - #define ia64_begin_bundle_template(code, bundle_template) do { \ ia64_emit_bundle (&code, TRUE); \ code.template = (bundle_template); \ } while (0) +#define ia64_unw_save_reg(code, reg, dreg) do { \ + g_assert (code.unw_op_count <= MAX_UNW_OPS); \ + code.unw_ops_pos [code.unw_op_count] = code.nins; \ + _U_dyn_op_save_reg (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, -1, reg, dreg); \ +} while (0) + +#define ia64_unw_add(code, reg, val) do { \ + g_assert (code.unw_op_count <= MAX_UNW_OPS); \ + code.unw_ops_pos [code.unw_op_count] = code.nins; \ + _U_dyn_op_add (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, reg, val); \ +} while (0) + #if 0 /* To ease debugging, emit instructions immediately */ #define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); @@ -246,21 +249,14 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); #endif #define ia64_emit_ins(code, itype, ins) do { \ - if (G_LIKELY (code.automatic)) { \ - code.instructions [code.nins] = ins; \ - code.itypes [code.nins] = itype; \ - code.nins ++; \ - code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \ - code.dep_info [code.dep_info_pos ++] = 0; \ - EMIT_BUNDLE (itype, code); \ - if (code.nins == IA64_INS_BUFFER_SIZE) \ - ia64_emit_bundle (&code, FALSE); \ - } else { \ - g_assert (code.nins < 3); \ - code.instructions [code.nins] = ins; \ - code.itypes [code.nins] = itype; \ - code.nins ++; \ - } \ + code.instructions [code.nins] = ins; \ + code.itypes [code.nins] = itype; \ + code.nins ++; \ + code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \ + code.dep_info [code.dep_info_pos ++] = 0; \ + EMIT_BUNDLE (itype, code); \ + if (code.nins == IA64_INS_BUFFER_SIZE) \ + ia64_emit_bundle (&code, FALSE); \ } while (0) #define ia64_no_stop(code) do { \ @@ -1691,7 +1687,7 @@ typedef enum { #define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh)) -#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) +#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0) #define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0) #define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1) @@ -1898,7 +1894,7 @@ typedef enum { #define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00) -#define ia64_x2(code, qp, r1, imm, vc) do { if (code.automatic && (code.nins > IA64_INS_BUFFER_SIZE - 2)) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) +#define ia64_x2(code, qp, r1, imm, vc) do { if (code.nins > IA64_INS_BUFFER_SIZE - 2) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0) #define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0) -- cgit v1.1 From 541c387c65579ca75abe8cdb9d0725c1e6d90df1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 11 Sep 2005 16:55:41 +0000 Subject: 2005-09-11 Zoltan Varga * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro. svn path=/trunk/mono/; revision=49910 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index d844653..f6433ce 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-09-11 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro. + 2005-09-10 Zoltan Varga * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 11c0e81..185228f 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -115,6 +115,7 @@ typedef enum { } Ia64BranchRegister; typedef enum { + IA64_CCV = 32, IA64_PFS = 64 } Ia64ApplicationRegister; @@ -241,6 +242,12 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); _U_dyn_op_add (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, reg, val); \ } while (0) +#define ia64_unw_pop_frames(code, nframes) do { \ + g_assert (code.unw_op_count <= MAX_UNW_OPS); \ + code.unw_ops_pos [code.unw_op_count] = code.nins; \ + _U_dyn_op_pop_frames (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, (nframes)); \ +} while (0) + #if 0 /* To ease debugging, emit instructions immediately */ #define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); -- cgit v1.1 From 7c363c19299d3f85ee7de0eec2a83108ea98eff2 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 26 Sep 2005 08:58:47 +0000 Subject: Compilation fix. svn path=/trunk/mono/; revision=50748 --- s390/tramp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/s390/tramp.c b/s390/tramp.c index 1483cfd..bac00fc 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -1117,6 +1117,7 @@ DEBUG(printf("Returns: %d\n",sig->ret->type)); /*-------------------------------------------------*/ /* stackval_to_data has placed data in result area */ /*-------------------------------------------------*/ + break; } break; default: -- cgit v1.1 From 64dbeb6e048aa9654800624a74e9c58065cf01ea Mon Sep 17 00:00:00 2001 From: Raja R Harinath Date: Tue, 27 Sep 2005 09:09:41 +0000 Subject: * arm/dpiops.sh, arm/fpaops.h: Output to stdout. * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix for srcdir != builddir. svn path=/trunk/mono/; revision=50833 --- ChangeLog | 6 ++++++ arm/Makefile.am | 8 ++++---- arm/dpiops.sh | 16 +++++++--------- arm/fpaops.sh | 12 +++++------- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/ChangeLog b/ChangeLog index f6433ce..2c3c778 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2005-09-27 Raja R Harinath + + * arm/dpiops.sh, arm/fpaops.h: Output to stdout. + * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix + for srcdir != builddir. + 2005-09-11 Zoltan Varga * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro. diff --git a/arm/Makefile.am b/arm/Makefile.am index f9a80d4..38ab336 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -14,13 +14,13 @@ libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ arm-dis.h arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th - bash $(srcdir)/dpiops.sh + (cd $(srcdir); bash ./dpiops.sh) > $@t + mv $@t $@ arm_fpamacros.h: fpaops.sh fpam_macros.th fpa_macros.th - bash $(srcdir)/fpaops.sh + (cd $(srcdir); bash ./fpaops.sh) > $@t + mv $@t $@ CLEANFILES = $(BUILT_SOURCES) EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th fpam_macros.th fpa_macros.th arm-fpa-codegen.h fpaops.sh - - diff --git a/arm/dpiops.sh b/arm/dpiops.sh index ad394ae..1802eec 100755 --- a/arm/dpiops.sh +++ b/arm/dpiops.sh @@ -3,30 +3,28 @@ OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC" CMP_OPCODES="TST TEQ CMP CMN" MOV_OPCODES="MOV MVN" -OUTFILE=arm_dpimacros.h # $1: opcode list # $2: template function gen() { for i in $1; do - sed "s//$i/g" $2.th >> $OUTFILE + sed "s//$i/g" $2.th done } -echo -e "/* Macros for DPI ops, auto-generated from template */\n" > $OUTFILE +echo -e "/* Macros for DPI ops, auto-generated from template */\n" -echo -e "\n/* mov/mvn */\n" >> $OUTFILE +echo -e "\n/* mov/mvn */\n" gen "$MOV_OPCODES" mov_macros -echo -e "\n/* DPIs, arithmetic and logical */\n" >> $OUTFILE +echo -e "\n/* DPIs, arithmetic and logical */\n" gen "$OPCODES" dpi_macros -echo -e "\n\n" >> $OUTFILE +echo -e "\n\n" -echo -e "\n/* DPIs, comparison */\n" >> $OUTFILE +echo -e "\n/* DPIs, comparison */\n" gen "$CMP_OPCODES" cmp_macros -echo -e "\n/* end generated */\n" >> $OUTFILE - +echo -e "\n/* end generated */\n" diff --git a/arm/fpaops.sh b/arm/fpaops.sh index 108e2bc..416b894 100755 --- a/arm/fpaops.sh +++ b/arm/fpaops.sh @@ -2,25 +2,23 @@ DYADIC="ADF MUF SUF RSF DVF RDF POW RPW RMF FML FDV FRD POL" MONADIC="MVF MNF ABS RND SQT LOG EXP SIN COS TAN ASN ACS ATN URD NRM" -OUTFILE=arm_fpamacros.h # $1: opcode list # $2: template function gen() { for i in $1; do - sed "s//$i/g" $2.th >> $OUTFILE + sed "s//$i/g" $2.th done } -echo -e "/* Macros for FPA ops, auto-generated from template */\n" > $OUTFILE +echo -e "/* Macros for FPA ops, auto-generated from template */\n" -echo -e "\n/* dyadic */\n" >> $OUTFILE +echo -e "\n/* dyadic */\n" gen "$DYADIC" fpa_macros -echo -e "\n/* monadic */\n" >> $OUTFILE +echo -e "\n/* monadic */\n" gen "$MONADIC" fpam_macros echo -e "\n\n" >> $OUTFILE -echo -e "\n/* end generated */\n" >> $OUTFILE - +echo -e "\n/* end generated */\n" -- cgit v1.1 From 749c9989f64683d8363481304647924ec1d910af Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 27 Sep 2005 13:25:16 +0000 Subject: Another compilation fix. svn path=/trunk/mono/; revision=50857 --- s390/tramp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/s390/tramp.c b/s390/tramp.c index bac00fc..f2f4b1b 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -649,6 +649,7 @@ printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); /* The callee has already placed the result */ /* in the required area */ /*------------------------------------------*/ + break; } break; case MONO_TYPE_VOID: -- cgit v1.1 From 2bba48015b516fd326cd082eb85325aa5b7676bf Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Fri, 7 Oct 2005 20:36:01 +0000 Subject: Patch incorporated from SUSE, Neale reviewed it svn path=/trunk/mono/; revision=51434 --- s390/tramp.c | 4 ++-- s390x/tramp.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/s390/tramp.c b/s390/tramp.c index f2f4b1b..5499161 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -644,7 +644,7 @@ printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); case 8: s390_stm (p, s390_r2, s390_r3, s390_r8, 0); break; - default: + default: ; /*------------------------------------------*/ /* The callee has already placed the result */ /* in the required area */ @@ -1114,7 +1114,7 @@ DEBUG(printf("Returns: %d\n",sig->ret->type)); case 8: s390_lm (p, s390_r2, s390_r3, s390_r10, 0); break; - default: + default: ; /*-------------------------------------------------*/ /* stackval_to_data has placed data in result area */ /*-------------------------------------------------*/ diff --git a/s390x/tramp.c b/s390x/tramp.c index 37b8de5..43306fb 100644 --- a/s390x/tramp.c +++ b/s390x/tramp.c @@ -644,7 +644,7 @@ printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); case 8: s390_stg (p, s390_r2, 0, s390_r8, 0); break; - default: + default: ; /*------------------------------------------*/ /* The callee has already placed the result */ /* in the required area */ @@ -1110,7 +1110,7 @@ mono_arch_create_method_pointer (MonoMethod *method) case 8: s390_lg (p, s390_r2, 0, s390_r10, 0); break; - default: + default: ; /*-------------------------------------------------*/ /* stackval_to_data has placed data in result area */ /*-------------------------------------------------*/ -- cgit v1.1 From 0b2d13a625bfd03f8d24538ef48870daed540ee3 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Fri, 7 Oct 2005 21:25:31 +0000 Subject: Patch incorporated from SUSE, Neale reviewed it svn path=/trunk/mono/; revision=51443 --- s390/s390-codegen.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index 38c7cbe..e7994f8 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -261,14 +261,14 @@ typedef struct { char op2; } RXF_Format; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char r1 : 4; char x2 : 4; char b2 : 4; int d2 : 20; char op2; -} RXY_Format __attribute__ ((packed)); +} RXY_Format; typedef struct { char op; @@ -294,23 +294,23 @@ typedef struct { int d2 : 12; } RS_Format_3; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char r1 : 4; char r3 : 4; char b2 : 4; int d2 : 20; char op2; -} RSY_Format_1 __attribute__ ((packed)); +} RSY_Format_1; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char r1 : 4; char m3 : 4; char b2 : 4; int d2 : 20; char op2; -} RSY_Format_2 __attribute__ ((packed)); +} RSY_Format_2; typedef struct { char op1; @@ -345,19 +345,19 @@ typedef struct { char op2; } RIE_Format; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char r1 : 4; char op2 : 4; int i2; -} RIL_Format_1 __attribute__ ((packed)); +} RIL_Format_1; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char m1 : 4; char op2 : 4; int i2; -} RIL_Format_2 __attribute__ ((packed)); +} RIL_Format_2; typedef struct { char op; @@ -366,13 +366,13 @@ typedef struct { short d1 : 12; } SI_Format; -typedef struct { +typedef struct __attribute__ ((packed)) { char op1; char i2; char b1 : 4; int d1 : 20; char op2; -} SIY_Format __attribute__ ((packed)); +} SIY_Format; typedef struct { short op; @@ -419,13 +419,13 @@ typedef struct { short d4 : 12; } SS_Format_4; -typedef struct { +typedef struct __attribute__ ((packed)) { short op; char b1 : 4; short d1 : 12; char b2 : 4; short d2 : 12; -} SSE_Format __attribute__ ((packed)); +} SSE_Format; #define s390_emit16(c, x) do \ { \ -- cgit v1.1 From bb6893fc1e1854a8c9f848dfbfbc2dd00bde8735 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 16 Oct 2005 15:21:39 +0000 Subject: 2005-10-16 Zoltan Varga * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp. svn path=/trunk/mono/; revision=51764 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 2c3c778..49e0daf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-10-16 Zoltan Varga + + * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp. + 2005-09-27 Raja R Harinath * arm/dpiops.sh, arm/fpaops.h: Output to stdout. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 88ee5b5..261d3b2 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -71,7 +71,7 @@ typedef enum #define AMD64_ARGUMENT_REGS ((1< Date: Sun, 30 Oct 2005 18:06:59 +0000 Subject: 2005-10-30 Zoltan Varga * ia64/ia64-codegen.h (ia64_m17): Fix a warning. svn path=/trunk/mono/; revision=52399 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 49e0daf..79fc9c3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-10-30 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_m17): Fix a warning. + 2005-10-16 Zoltan Varga * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 185228f..d3b8aae 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -248,6 +248,19 @@ void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush); _U_dyn_op_pop_frames (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, (nframes)); \ } while (0) +#define ia64_unw_label_state(code, id) do { \ + g_assert (code.unw_op_count <= MAX_UNW_OPS); \ + code.unw_ops_pos [code.unw_op_count] = code.nins; \ + _U_dyn_op_label_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \ +} while (0) + + +#define ia64_unw_copy_state(code, id) do { \ + g_assert (code.unw_op_count <= MAX_UNW_OPS); \ + code.unw_ops_pos [code.unw_op_count] = code.nins; \ + _U_dyn_op_copy_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \ +} while (0) + #if 0 /* To ease debugging, emit instructions immediately */ #define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE); @@ -1549,7 +1562,7 @@ typedef enum { #define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3))) -#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); int aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) +#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm; read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) #define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) -- cgit v1.1 From f5fc186c01c764705e303b3783bf06e507e54640 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 13 Dec 2005 13:57:51 +0000 Subject: Avoid lvalue pointer casts. svn path=/trunk/mono/; revision=54279 --- arm/arm-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 1d0e6e2..5de57b2 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -31,7 +31,7 @@ arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32); void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;} # define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i)) #else -# define ARM_EMIT(p, i) do {*(arminstr_t*)p = (arminstr_t)i; ((arminstr_t*)p)++;} while (0) +# define ARM_EMIT(p, i) do { arminstr_t *__ainstrp = (void*)(p); *__ainstrp = (arminstr_t)(i); (p) = (void*)(__ainstrp+1);} while (0) #endif #if defined(_MSC_VER) && !defined(ARM_NOIASM) -- cgit v1.1 From 259b4749eaf68bfd6818ab38df91e37239c5dd45 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 13 Dec 2005 19:12:20 +0000 Subject: Continuing to bring s390 up to current levels svn path=/trunk/mono/; revision=54312 --- s390/ChangeLog | 4 ++++ s390/s390-codegen.h | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/s390/ChangeLog b/s390/ChangeLog index 5186c80..1e64bd7 100644 --- a/s390/ChangeLog +++ b/s390/ChangeLog @@ -1,3 +1,7 @@ +2005-12-13 Neale Ferguson + + * s390-codegen.h: Add some new instructions (conditional jumps) + 2004-12-15 Neale Ferguson * s390-codegen.h: Add some new instructions (CS, CDS) diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h index e7994f8..6457357 100644 --- a/s390/s390-codegen.h +++ b/s390/s390-codegen.h @@ -155,6 +155,7 @@ typedef enum { #define S390_CC_LT 4 #define S390_CC_GT 2 #define S390_CC_GE 11 +#define S390_CC_NM 11 #define S390_CC_LE 13 #define S390_CC_OV 1 #define S390_CC_NO 14 @@ -575,6 +576,7 @@ typedef struct __attribute__ ((packed)) { #define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) #define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) #define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2) #define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) #define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) #define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) @@ -615,8 +617,11 @@ typedef struct __attribute__ ((packed)) { #define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) #define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) #define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jno(c, d) s390_brc(c, S390_CC_NO, d) #define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) #define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d) +#define s390_jnc(c, d) s390_brc(c, S390_CC_NC, d) #define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) #define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) #define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) -- cgit v1.1 From 417b7fbe8f810e8fd62b2cb805164a3b80a536d6 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 22 Dec 2005 20:18:18 +0000 Subject: 2005-12-22 Zoltan Varga * sparc/sparc-codegen.h (sparc_membar): Add membar instruction. svn path=/trunk/mono/; revision=54750 --- ChangeLog | 4 ++++ sparc/sparc-codegen.h | 15 +++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/ChangeLog b/ChangeLog index 79fc9c3..5be42bb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2005-12-22 Zoltan Varga + + * sparc/sparc-codegen.h (sparc_membar): Add membar instruction. + 2005-10-30 Zoltan Varga * ia64/ia64-codegen.h (ia64_m17): Fix a warning. diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 2e447a4..38ccb42 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -219,6 +219,19 @@ typedef enum { sparc_fcmpeq_val = 87 } SparcFOp; +typedef enum { + sparc_membar_load_load = 0x1, + sparc_membar_store_load = 0x2, + sparc_membar_load_store = 0x4, + sparc_membar_store_store = 0x8, + + sparc_membar_lookaside = 0x10, + sparc_membar_memissue = 0x20, + sparc_membar_sync = 0x40, + + sparc_membar_all = 0x4f +} SparcMembarFlags; + typedef struct { unsigned int op : 2; /* always 1 */ unsigned int disp : 30; @@ -649,6 +662,8 @@ typedef struct { #define sparc_flushw(ins) sparc_encode_format3a((ins),2,0,0,0,43,0) +#define sparc_membar(ins,flags) sparc_encode_format3b ((ins), 2, 0xf, (flags), 0x28, 0) + /* trap */ #define sparc_ta(ins,tt) sparc_encode_format3b((ins),2,0,(tt),58,0x8) -- cgit v1.1 From 1092c74e7a468b7761df92c2dc0dd2f2b49f21e6 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 3 Jan 2006 19:40:34 +0000 Subject: * mono/io-layer/ChangeLog, mono/io-layer/atomic.h, mono/mini/mini-s390x.c, mono/mini/mini-s390x.h, mono/mini/exceptions-s390x.c, mono/mini/ChangeLog, mono/mini/s390-abi.cs, mono/mini/tramp-s390x.c, mono/mini/inssel-s390x.brg, mono/mini/cpu-s390x.md, mono/mini/mini-codegen.c mono/mini/basic-long.cs, mono/mini/Makefile.am, mono/arch/s390x/ChangeLog mono/arch/s390x/s390x-codegen.h: 64-bit s390 support svn path=/trunk/mono/; revision=55020 --- s390x/ChangeLog | 4 +++ s390x/s390x-codegen.h | 86 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 62 insertions(+), 28 deletions(-) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index 1cb7ef6..e9ba775 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2006-01-03 Neale Ferguson + + * s390x-codegen.h: Add some new instructions. + 2004-12-15 Neale Ferguson * s390x-codegen.h: Add some new instructions (CS, CSG, CSY, CDS, CDSG, CDSY) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index ce9281c..00331ba 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -26,12 +26,12 @@ /*------------------------------------------------------------------*/ #define ADD_ISTACK_PARM(r, i) \ if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ + s390_lay (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ reg_param += (i); \ } else { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + s390_lay (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ stack_param += (i); \ } @@ -40,13 +40,13 @@ /*------------------------------------------------------------------*/ #define ADD_RSTACK_PARM(i) \ if (fpr_param < FLOAT_REGS) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - float_pos + (fpr_param * sizeof(float) * (i))); \ + s390_lay (p, s390_r4, 0, STK_BASE, \ + float_pos + (fpr_param * sizeof(float) * (i))); \ fpr_param++; \ } else { \ stack_param += (stack_param % (i)); \ - s390_la (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \ + s390_lay (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \ stack_param += (i); \ } @@ -55,25 +55,26 @@ /*------------------------------------------------------------------*/ #define ADD_TSTACK_PARM \ if (reg_param < GENERAL_REGS) { \ - s390_l (p, s390_r4, 0, STK_BASE, \ + s390_ly (p, s390_r4, 0, STK_BASE, \ local_start + (reg_param - this_flag) * sizeof(long)); \ reg_param++; \ } else { \ - s390_l (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + s390_ly (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ stack_param++; \ } #define ADD_PSTACK_PARM(r, i) \ if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ + s390_lay (p, s390_r4, 0, STK_BASE, \ + local_start + (reg_param - this_flag) * sizeof(long)); \ reg_param += (i); \ } else { \ - s390_l (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ + s390_ly (p, s390_r4, 0, STK_BASE, \ + sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ stack_param++; \ } + typedef enum { s390_r0 = 0, s390_r1, @@ -135,12 +136,13 @@ typedef enum { s390_fpc = 256, } S390SpecialRegister; -#define s390_is_imm16(val) ((gint)val >= (gint)-(1<<15) && \ - (gint)val <= (gint)((1<<15)-1)) -#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 65535) -#define s390_is_imm12(val) ((gint)val >= (gint)-(1<<11) && \ - (gint)val <= (gint)((1<<15)-1)) -#define s390_is_uimm12(val) ((gint)val >= 0 && (gint)val <= 4095) +#define s390_is_imm16(val) ((glong)val >= (glong)-(1<<15) && \ + (glong)val <= (glong)((1<<15)-1)) +#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= 65535) +#define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575) +#define s390_is_imm12(val) ((glong)val >= (glong)-(1<<11) && \ + (glong)val <= (glong)((1<<15)-1)) +#define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095) #define STK_BASE s390_r15 #define S390_MINIMAL_STACK_SIZE 160 @@ -155,6 +157,7 @@ typedef enum { #define S390_CC_LT 4 #define S390_CC_GT 2 #define S390_CC_GE 11 +#define S390_CC_NM 11 #define S390_CC_LE 13 #define S390_CC_OV 1 #define S390_CC_NO 14 @@ -572,6 +575,7 @@ typedef struct { #define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) #define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) #define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d) +#define s390_agf(c, r, x, b, d) S390_RXY(c, 0xe318, r, x, b, d) #define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v) #define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2) #define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) @@ -579,10 +583,13 @@ typedef struct { #define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) #define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) #define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d) +#define s390_algf(c, r, x, b, d) S390_RXY(c, 0xe31a, r, x, b, d) #define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2) #define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) #define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) #define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) +#define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2) +#define s390_bctrg(c, r1, r2) S390_RRE(c, 0xb946, r1, r2) #define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) #define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) #define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) @@ -592,11 +599,13 @@ typedef struct { #define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) #define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) #define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) +#define s390_cdgbr(c, r1, r2) S390_RRE(c, 0xb3a5, r1, r2) #define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d) -#define s390_cdsg(c, r1, r2, b, d) S390_RSY(c, 0xeb3e, r1, r2, b, d) -#define s390_cdsy(c, r1, r2, b, d) S390_RSY(c, 0xeb31, r1, r2, b, d) +#define s390_cdsg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb3e, r1, r2, b, d) +#define s390_cdsy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb31, r1, r2, b, d) #define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) #define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) +#define s390_cgdbr(c, r1, m, r2) S390_RRF_2(c, 0xb3a9, r1, m, r2) #define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d) #define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i) #define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2) @@ -607,8 +616,8 @@ typedef struct { #define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) #define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) #define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d) -#define s390_csg(c, r1, r2, b, d) S390_RSY(c, 0xeb30, r1, r2, b, d) -#define s390_csy(c, r1, r2, b, d) S390_RSY(c, 0xeb14, r1, r2, b, d) +#define s390_csg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb30, r1, r2, b, d) +#define s390_csy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb14, r1, r2, b, d) #define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) #define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) #define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) @@ -620,9 +629,12 @@ typedef struct { #define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2) #define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) #define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) -#define s390_jc(c, m, d) s390_brc(c, m, d) +#define s390_icmy(c, r, x, b, d) S390_RXY(c, 0xeb81, r, x, b, d) +#define s390_icy(c, r, x, b, d) S390_RXY(c, 0xe373, r, x, b, d) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) +#define s390_jc(c, m, d) s390_brc(c, m, d) #define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) +#define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) #define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) @@ -630,37 +642,45 @@ typedef struct { #define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) #define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) #define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) +#define s390_jnc(c, d) s390_brc(c, S390_CC_NC, d) #define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) #define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) #define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) #define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) #define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) +#define s390_jno(c, d) s390_brc(c, S390_CC_NO, d) #define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) #define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) +#define s390_ly(c, r, x, b, d) S390_RXY(c, 0xe358, r, x, b, d) #define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) +#define s390_lay(c, r, x, b, d) S390_RXY(c, 0xe371, r, x, b, d) #define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) #define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) #define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) #define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) #define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2) #define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) -#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) #define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) +#define s390_ldy(c, r, x, b, d) S390_RXY(c, 0xed65, r, x, b, d) #define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) #define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) #define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) #define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) #define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) #define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) +#define s390_ley(c, r, x, b, d) S390_RXY(c, 0xed64, r, x, b, d) #define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) #define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) #define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) #define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) +#define s390_lgh(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) #define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) #define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) #define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) #define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) #define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) +#define s390_lhy(c, r, x, b, d) S390_RXY(c, 0xe378, r, x, b, d) #define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d) #define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d) #define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2) @@ -706,15 +726,18 @@ typedef struct { #define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) #define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) #define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d) +#define s390_sgf(c, r, x, b, d) S390_RXY(c, 0xe319, r, x, b, d) #define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2) #define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) #define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d) +#define s390_slbg(c, r, x, b, d) S390_RXY(c, 0xe389, r, x, b, d) #define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2) #define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) #define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) #define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) #define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) #define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d) +#define s390_slgf(c, r, x, b, d) S390_RXY(c, 0xe31b, r, x, b, d) #define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2) #define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) #define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d) @@ -728,17 +751,24 @@ typedef struct { #define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) #define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) #define s390_srlg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0c, r1, r2, b, d) +#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) #define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) #define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) #define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) -#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) +#define s390_stcmy(c, r, x, b, d) S390_RXY(c, 0xeb2d, r, x, b, d) +#define s390_stcy(c, r, x, b, d) S390_RXY(c, 0xe372, r, x, b, d) #define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) +#define s390_stdy(c, r, x, b, d) S390_RXY(c, 0xed67, r, x, b, d) #define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) +#define s390_stey(c, r, x, b, d) S390_RXY(c, 0xed66, r, x, b, d) #define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) #define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) +#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) #define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) +#define s390_sthy(c, r, x, b, d) S390_RXY(c, 0xe370, r, x, b, d) #define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) #define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d) +#define s390_sty(c, r, x, b, d) S390_RXY(c, 0xe350, r, x, b, d) #define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) #define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) #define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) -- cgit v1.1 From 15bc8b574c91bfaa40cd1d83374d0179148b5894 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 6 Jan 2006 18:52:21 +0000 Subject: * s390x-codegen.h: Add lpdbr instruction (OP_ABS). * mini-s390x.c, inssel-s390x.brg, cpu-s390x.md: Fix ATOMIC_I8 operations. Provide initial support for OP_ABS. svn path=/trunk/mono/; revision=55158 --- s390x/ChangeLog | 4 ++++ s390x/s390x-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index e9ba775..c2e9ee9 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2006-01-06 Neale Ferguson + + * s390x-codegen.h: Add lpdbr instruction (OP_ABS). + 2006-01-03 Neale Ferguson * s390x-codegen.h: Add some new instructions. diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 00331ba..8aefa46 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -690,6 +690,7 @@ typedef struct { #define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) #define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2) #define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) +#define s390_lpdbr(c, r1, r2) S390_RRE(c, 0xb310, r1, r2) #define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2) #define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) #define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) -- cgit v1.1 From 0d566f3cb37ddf731fba6cfce9741e2224a13d77 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Mon, 13 Mar 2006 22:03:39 +0000 Subject: * s390x-codegen.h: Fix immediate checks. svn path=/trunk/mono/; revision=57914 --- s390x/ChangeLog | 4 ++++ s390x/s390x-codegen.h | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index c2e9ee9..e05279d 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2006-03-13 Neale Ferguson + + * s390x-codegen.h: Fix immediate checks. + 2006-01-06 Neale Ferguson * s390x-codegen.h: Add lpdbr instruction (OP_ABS). diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 8aefa46..9ef7475 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -140,8 +140,9 @@ typedef enum { (glong)val <= (glong)((1<<15)-1)) #define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= 65535) #define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575) +#define s390_is_imm20(val) ((glong)val >= -524288 && (glong)val <= 524287) #define s390_is_imm12(val) ((glong)val >= (glong)-(1<<11) && \ - (glong)val <= (glong)((1<<15)-1)) + (glong)val <= (glong)((1<<11)-1)) #define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095) #define STK_BASE s390_r15 -- cgit v1.1 From a65cd014e420a38b47e00f5c6f9ce590fc00987b Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 4 Apr 2006 13:18:49 +0000 Subject: 2006-04-04 Zoltan Varga * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the interpreter. svn path=/trunk/mono/; revision=59009 --- ChangeLog | 5 +++++ Makefile.am | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 5be42bb..cda1dfb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2006-04-04 Zoltan Varga + + * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the + interpreter. + 2005-12-22 Zoltan Varga * sparc/sparc-codegen.h (sparc_membar): Add membar instruction. diff --git a/Makefile.am b/Makefile.am index e006bec..1d33d32 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,9 +1,11 @@ -SUBDIRS = $(arch_target) DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) if INTERP_SUPPORTED + +SUBDIRS = $(arch_target) + noinst_LTLIBRARIES = libmonoarch.la libmonoarch_la_SOURCES = unknown.c -- cgit v1.1 From e830aadb2febf62051b8fc162884a909087cfe4e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 12 Apr 2006 19:02:09 +0000 Subject: 2006-04-12 Zoltan Varga * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro. svn path=/trunk/mono/; revision=59415 --- ChangeLog | 4 ++++ sparc/sparc-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index cda1dfb..785c48e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2006-04-12 Zoltan Varga + + * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro. + 2006-04-04 Zoltan Varga * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index 38ccb42..c04f5ce 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -381,6 +381,7 @@ typedef struct { #define sparc_inst_op2(inst) (((inst) >> 22) & 0x7) #define sparc_inst_rd(inst) (((inst) >> 25) & 0x1f) #define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f) +#define sparc_inst_i(inst) (((inst) >> 13) & 0x1) #define sparc_inst_rs1(inst) (((inst) >> 14) & 0x1f) #define sparc_inst_rs2(inst) (((inst) >> 0) & 0x1f) #define sparc_inst_imm(inst) (((inst) >> 13) & 0x1) -- cgit v1.1 From 3b274ddc5c946640a4c0d6a7b2dee13cd2f5096d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 21 Apr 2006 14:51:24 +0000 Subject: 2006-04-21 Zoltan Varga * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old behaviour. svn path=/trunk/mono/; revision=59758 --- ChangeLog | 5 +++++ Makefile.am | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 785c48e..94c1b2b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2006-04-21 Zoltan Varga + + * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old + behaviour. + 2006-04-12 Zoltan Varga * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro. diff --git a/Makefile.am b/Makefile.am index 1d33d32..70b24b9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -2,15 +2,17 @@ DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -if INTERP_SUPPORTED - +# arm needs to build some stuff even in JIT mode SUBDIRS = $(arch_target) +if INTERP_SUPPORTED + noinst_LTLIBRARIES = libmonoarch.la libmonoarch_la_SOURCES = unknown.c libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la + endif EXTRA_DIST = ChangeLog -- cgit v1.1 From de54a3e44b1214298b39386b49e1ca992176e2e4 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 14 May 2006 18:51:25 +0000 Subject: 2006-05-14 Zoltan Varga * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this opcode. svn path=/trunk/mono/; revision=60695 --- ChangeLog | 5 +++++ ia64/ia64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 94c1b2b..21a8c34 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2006-05-14 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this + opcode. + 2006-04-21 Zoltan Varga * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index d3b8aae..6265451 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1565,7 +1565,7 @@ typedef enum { #define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm; read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0) #define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) -#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12) +#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x13) #define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16) #define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17) -- cgit v1.1 From ef8021400f045f835fcf70baf5ba5880fe6eca93 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Thu, 15 Jun 2006 15:00:59 +0000 Subject: Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro * ppc/ppc-codegen.h: reduce noisy build warnings by casting to the more commonly used unsigned char type (from johannes@sipsolutions.net (Johannes Berg)). svn path=/trunk/mono/; revision=61757 --- ChangeLog | 7 +++++++ ppc/ppc-codegen.h | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 21a8c34..48bea16 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ + +Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro + + * ppc/ppc-codegen.h: reduce noisy build warnings by + casting to the more commonly used unsigned char type + (from johannes@sipsolutions.net (Johannes Berg)). + 2006-05-14 Zoltan Varga * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index aa85d45..c3181e7 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -117,7 +117,7 @@ enum { PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ }; -#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (char *)(c) + sizeof (guint32);} while (0) +#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (guint8 *)(c) + sizeof (guint32);} while (0) #define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) #define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) -- cgit v1.1 From 8f58fa13418008cb86a8ba450a894b23efc4574e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 19 Jul 2006 12:09:09 +0000 Subject: 2006-07-19 Zoltan Varga * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from Sergey Tikhonov . Updates to alpha support. svn path=/trunk/mono/; revision=62745 --- ChangeLog | 4 +++ alpha/alpha-codegen.h | 84 ++++++++++++++++++++++++++++++++++++++------------- alpha/test.c | 16 ++++++---- alpha/tramp.c | 46 ++++++++++++++++------------ 4 files changed, 103 insertions(+), 47 deletions(-) diff --git a/ChangeLog b/ChangeLog index 48bea16..c2c4f24 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2006-07-19 Zoltan Varga + + * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from + Sergey Tikhonov . Updates to alpha support. Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index 26dc591..194c327 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -160,7 +160,6 @@ typedef enum { #define __alpha_int_32 unsigned int - /***************************************/ #define AXP_OFF26_MASK 0x03ffffff #define AXP_OFF21_MASK 0x01fffff @@ -210,7 +209,7 @@ typedef enum { ((func & AXP_OFF7_MASK) << AXP_REGSIZE) #define alpha_op_literal( lit ) \ - ((lit & AXP_OFF7_MASK) << AXP_LIT_SHIFT) + ((lit & AXP_OFF8_MASK) << AXP_LIT_SHIFT) #define alpha_mem_br_func( func, hint ) \ (((func & AXP_OFF2_MASK ) << AXP_MEM_BR_SHIFT ) | (hint&AXP_OFF14_MASK)) @@ -236,48 +235,56 @@ typedef enum { #define alpha_encode_palcall( ins, op, func ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | ( func & AXP_OFF26_MASK )) + *((__alpha_int_32*)(ins)) = ( 0 |\ + alpha_opcode( op ) | ( func & AXP_OFF26_MASK )),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_mem( ins, op, Rdest, Rsrc, offset ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | (offset & AXP_OFF16_MASK )) + alpha_reg_b( Rsrc ) | (offset & AXP_OFF16_MASK )),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_mem_fc( ins, op, func, Rdest, Rsrc, offset ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | alpha_mem_fc_func( func )) + alpha_reg_b( Rsrc ) | alpha_mem_fc_func( func )),\ + *((__alpha_int_32*)(ins))++ #define alpha_encode_mem_br( ins, op, func, Rdest, Rsrc, hint ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | alpha_mem_br_func( func, hint ) ) + alpha_reg_b( Rsrc ) | alpha_mem_br_func( func, hint ) ),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_branch( ins, op, Reg, offset ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Reg ) | \ - (offset & AXP_OFF21_MASK )) + (offset & AXP_OFF21_MASK )),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_op( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ alpha_reg_b( Rsrc2 ) | alpha_op_func( func ) | \ - alpha_reg_c( Rdest )) + alpha_reg_c( Rdest )),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_opl( ins, op, func, Rsrc, lit, Rdest ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ + *((__alpha_int_32*)(ins)) = ( 0 |\ + alpha_opcode( op ) | alpha_reg_a( Rsrc ) | \ alpha_op_literal(lit) | ( 1 << 12 ) | \ - alpha_op_func( func ) | alpha_reg_c( Rdest ) ) + alpha_op_func( func ) | alpha_reg_c( Rdest ) ),\ + ((__alpha_int_32*)(ins))++ #define alpha_encode_fpop( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ - *((__alpha_int_32*)(ins))++ = ( 0 |\ + *((__alpha_int_32*)(ins)) = ( 0 |\ alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ alpha_reg_b( Rsrc2 ) | alpha_fp_func( func ) | \ - alpha_reg_c( Rdest )) + alpha_reg_c( Rdest )),\ + ((__alpha_int_32*)(ins))++ /***************************************/ @@ -504,13 +511,48 @@ typedef enum { #define alpha_extqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x7a, Rsrc1, Rsrc2, Rdest ) #define alpha_extqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x7a, Rsrc1, lit, Rdest ) +#define alpha_mull(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_op( ins, 0x13, 0x00, Rsrc1, Rsrc2, Rdest ) +#define alpha_mull_(ins, Rsrc1, lit, Rdest) alpha_encode_op( ins, 0x13, 0x00, Rsrc1, lit, Rdest ) +#define alpha_mulq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, Rsrc2, Rdest ) +#define alpha_mulq_(ins, Rsrc1, lit, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, lit, Rdest ) + +// For 264 #define alpha_ftois( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x078, RFsrc, alpha_zero, Rdest ) #define alpha_ftoit( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) #define alpha_ftoi_qf( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) - +// For 264 #define alpha_itofs( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x004, Rsrc, alpha_zero, RFdest ) #define alpha_itoff( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x014, Rsrc, alpha_zero, RFdest ) #define alpha_itoft( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) #define alpha_itof_qf( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) -#endif \ No newline at end of file +#define alpha_cvtts_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x2C, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvttq_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x2F, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvtqs_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x3C, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvtqt_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x3E, alpha_fzero, Rsrc2, Rdest ) + + +#define alpha_adds(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x080, Rsrc1, Rsrc2, Rdest ) +#define alpha_subs(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x081, Rsrc1, Rsrc2, Rdest ) +#define alpha_addt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A0, Rsrc1, Rsrc2, Rdest ) +#define alpha_subt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A1, Rsrc1, Rsrc2, Rdest ) +#define alpha_mult(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A2, Rsrc1, Rsrc2, Rdest ) +#define alpha_divt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A3, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptun(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A4, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpteq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A5, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptlt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A6, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptle(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A7, Rsrc1, Rsrc2, Rdest ) + +#define alpha_cvtts(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AC, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvttq(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AF, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvtqs(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BC, alpha_fzero, Rsrc2, Rdest ) +#define alpha_cvtqt(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BE, alpha_fzero, Rsrc2, Rdest ) + + +#define alpha_cpys(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x020, Rsrc1, Rsrc2, Rdest ) +#define alpha_cpysn(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x021, Rsrc1, Rsrc2, Rdest ) +#define alpha_cpyse(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x022, Rsrc1, Rsrc2, Rdest ) + + +#endif + diff --git a/alpha/test.c b/alpha/test.c index 27db190..b922750 100644 --- a/alpha/test.c +++ b/alpha/test.c @@ -5,6 +5,8 @@ #include #include #include +#include +#include /* A typical Alpha stack frame looks like this */ /* @@ -39,13 +41,13 @@ fun..ng: // called from inside the module. // // Simple function which returns 10. // -int testfunc() +static int testfunc(void) { return 10; } // Write it using the known asm bytecodes. -char * write_testfunc_1( char * p ) +static unsigned int * write_testfunc_1(unsigned int * p ) { // // ldah gp, 0(pv) @@ -81,7 +83,7 @@ int _func_code[] = { } // The same function encoded with alpha-codegen.h -char * write_testfunc_2( char * p ) +unsigned int * write_testfunc_2( unsigned int * p ) { alpha_ldah( p, alpha_gp, alpha_pv, 0 ); // start the gp load alpha_lda( p, alpha_sp, alpha_sp, -16 ); // allocate the stack @@ -118,10 +120,12 @@ void output( char * p, int len ) close( fd ); } +unsigned int code [16000/4]; + int main( int argc, char ** argv ) { - char code [16000]; - char *p = code; - char * cp; +// unsigned int code [16000/4]; + unsigned int *p = code; + unsigned int * cp; int (*x)() = 0; int y = 0; diff --git a/alpha/tramp.c b/alpha/tramp.c index 7d9fe02..23c3846 100644 --- a/alpha/tramp.c +++ b/alpha/tramp.c @@ -111,9 +111,10 @@ call_func..ng: /* */ /* void func (void (*callme)(), void *retval, */ /* void *this_obj, stackval *arguments); */ -static inline guint8 * -emit_prolog (guint8 *p, const gint SIZE, int hasthis ) +static inline unsigned int * +emit_prolog (unsigned int *pi, const gint SIZE, int hasthis ) { + unsigned int *p = (unsigned int *)pi; // 9 instructions. alpha_ldah( p, alpha_gp, alpha_pv, 0 ); alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(pv) @@ -127,9 +128,9 @@ emit_prolog (guint8 *p, const gint SIZE, int hasthis ) alpha_a2: will be moved into alpha_a0... if hasthis is true. */ /* store parameters on stack.*/ - alpha_stq( p, alpha_ra, alpha_sp, SIZE-24 ); // ra - alpha_stq( p, alpha_fp, alpha_sp, SIZE-16 ); // fp - alpha_stq( p, alpha_a1, alpha_sp, SIZE-8 ); // retval + alpha_stq( p, alpha_ra, alpha_sp, (SIZE-24) ); // ra + alpha_stq( p, alpha_fp, alpha_sp, (SIZE-16) ); // fp + alpha_stq( p, alpha_a1, alpha_sp, (SIZE-8) ); // retval /* set the frame pointer */ alpha_mov1( p, alpha_sp, alpha_fp ); @@ -144,9 +145,11 @@ emit_prolog (guint8 *p, const gint SIZE, int hasthis ) return p; } -static inline guint8 * -emit_call( guint8 *p , const gint SIZE ) +static inline unsigned int * +emit_call( unsigned int *pi , const gint SIZE ) { + unsigned int *p = (unsigned int *)pi; + // 3 instructions /* call func */ alpha_jsr( p, alpha_ra, alpha_pv, 0 ); // jsr ra, 0(pv) @@ -158,29 +161,32 @@ emit_call( guint8 *p , const gint SIZE ) return p; } -static inline guint8 * -emit_store_return_default(guint8 *p, const gint SIZE ) +static inline unsigned int * +emit_store_return_default(unsigned int *pi, const gint SIZE ) { // 2 instructions. + unsigned int *p = (unsigned int *)pi; /* TODO: This probably do different stuff based on the value. you know, like stq/l/w. and s/f. */ - alpha_ldq( p, alpha_t0, alpha_fp, SIZE-8 ); // load void * retval + alpha_ldq( p, alpha_t0, alpha_fp, (SIZE-8) ); // load void * retval alpha_stq( p, alpha_v0, alpha_t0, 0 ); // store the result to *retval. return p; } -static inline guint8 * -emit_epilog (guint8 *p, const gint SIZE ) +static inline unsigned int * +emit_epilog (unsigned int *pi, const gint SIZE ) { + unsigned int *p = (unsigned int *)pi; + // 5 instructions. alpha_mov1( p, alpha_fp, alpha_sp ); /* restore fp, ra, sp */ - alpha_ldq( p, alpha_ra, alpha_sp, SIZE-24 ); - alpha_ldq( p, alpha_fp, alpha_sp, SIZE-16 ); + alpha_ldq( p, alpha_ra, alpha_sp, (SIZE-24) ); + alpha_ldq( p, alpha_fp, alpha_sp, (SIZE-16) ); alpha_lda( p, alpha_sp, alpha_sp, ((SIZE & 8) ? (SIZE+8) : SIZE) ); /* return */ @@ -215,8 +221,8 @@ static void calculate_size(MonoMethodSignature *sig, int * INSTRUCTIONS, int * S MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { - unsigned char *p; - unsigned char *buffer; + unsigned int *p; + unsigned int *buffer; MonoType* param; int i, pos; @@ -240,7 +246,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) // allocate. - buffer = p = malloc(BUFFER_SIZE); + buffer = p = (unsigned int *)malloc(BUFFER_SIZE); memset( buffer, 0, BUFFER_SIZE ); pos = 8 * (sig->param_count - alpharegs - 1); @@ -264,7 +270,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) else { // load into register - alpha_ldq( p, regbase + i, alpha_t0, ARG_LOC( i ) ); + alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC( i ) ); } } else @@ -299,7 +305,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) else { // load into register - alpha_ldl( p, regbase + i, alpha_t0, ARG_LOC(i) ); + alpha_ldl( p, (regbase + i), alpha_t0, (ARG_LOC(i)) ); } break; case MONO_TYPE_I: @@ -321,7 +327,7 @@ mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) else { // load into register - alpha_ldq( p, regbase + i, alpha_t0, ARG_LOC(i) ); + alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC(i) ); } break; case MONO_TYPE_R4: -- cgit v1.1 From 207e90216277d1d1ee0e6cd37f183440c8c39a26 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 19 Jul 2006 12:10:43 +0000 Subject: 2006-07-19 Zoltan Varga * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg. svn path=/trunk/mono/; revision=62746 --- ChangeLog | 2 ++ amd64/amd64-codegen.h | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index c2c4f24..83baac5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2006-07-19 Zoltan Varga + * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg. + * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from Sergey Tikhonov . Updates to alpha support. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 261d3b2..b734932 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -186,6 +186,20 @@ typedef union { x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) +#define amd64_mov_mem_reg(inst,mem,reg,size) \ + do { \ + if ((size) == 2) \ + *(inst)++ = (unsigned char)0x66; \ + amd64_emit_rex(inst, (size), (reg), 0, 0); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + } while (0) #define amd64_mov_reg_reg(inst,dreg,reg,size) \ do { \ @@ -768,7 +782,7 @@ typedef union { #define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) #define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) #define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) -#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) +//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) //#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) //#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) #define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) -- cgit v1.1 From deacad246a936216f09a81b9881c6780de8dd406 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 12 Sep 2006 10:05:29 +0000 Subject: 2006-09-12 Zoltan Varga * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops. svn path=/trunk/mono/; revision=65289 --- ChangeLog | 4 ++++ ia64/ia64-codegen.h | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/ChangeLog b/ChangeLog index 83baac5..f4e6c6c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2006-09-12 Zoltan Varga + + * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops. + 2006-07-19 Zoltan Varga * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg. diff --git a/ia64/ia64-codegen.h b/ia64/ia64-codegen.h index 6265451..1793580 100644 --- a/ia64/ia64-codegen.h +++ b/ia64/ia64-codegen.h @@ -1791,6 +1791,12 @@ typedef enum { #define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3) #define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2) +/* Pseudo ops */ +#define ia64_xmpy_l_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0) +#define ia64_xmpy_lu_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0) +#define ia64_xmpy_h_pred(code, qp, f1, f3, f4) ia64_xma_h_pred ((code), (qp), (f1), (f3), (f4), 0) +#define ia64_xmpy_hu_pred(code, qp, f1, f3, f4) ia64_xma_hu_pred ((code), (qp), (f1), (f3), (f4), 0) + #define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0) #define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0) @@ -3078,6 +3084,12 @@ typedef enum { #define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2) #define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2) +/* Pseudo ops */ +#define ia64_xmpy_l(code, f1, f3, f4) ia64_xmpy_l_pred ((code), 0, (f1), (f3), (f4)) +#define ia64_xmpy_lu(code, f1, f3, f4) ia64_xmpy_lu_pred ((code), 0, (f1), (f3), (f4)) +#define ia64_xmpy_h(code, f1, f3, f4) ia64_xmpy_h_pred ((code), 0, (f1), (f3), (f4)) +#define ia64_xmpy_hu(code, f1, f3, f4) ia64_xmpy_hu_pred ((code), 0, (f1), (f3), (f4)) + #define ia64_fselect(code, f1, f3, f4, f2) ia64_fselect_pred ((code), 0, f1, f3, f4, f2) #define ia64_fcmp_eq_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), 0, p1, p2, f2, f3, sf) -- cgit v1.1 From 0689ca5f72fa8cb03fb1b565a31c4e2b22774a64 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Tue, 12 Sep 2006 11:10:42 +0000 Subject: Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro * arm/*: VFP floating point format code generation support. svn path=/trunk/mono/; revision=65295 --- ChangeLog | 5 + arm/Makefile.am | 11 +- arm/arm-vfp-codegen.h | 183 ++++++++++++++++++++++++++++++ arm/arm_vfpmacros.h | 299 ++++++++++++++++++++++++++++++++++++++++++++++++++ arm/fpaops.sh | 2 +- arm/vfp_macros.th | 15 +++ arm/vfpm_macros.th | 14 +++ arm/vfpops.sh | 24 ++++ 8 files changed, 550 insertions(+), 3 deletions(-) create mode 100644 arm/arm-vfp-codegen.h create mode 100644 arm/arm_vfpmacros.h create mode 100644 arm/vfp_macros.th create mode 100644 arm/vfpm_macros.th create mode 100755 arm/vfpops.sh diff --git a/ChangeLog b/ChangeLog index f4e6c6c..ab85855 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro + + * arm/*: VFP floating point format code generation support. + 2006-09-12 Zoltan Varga * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops. diff --git a/arm/Makefile.am b/arm/Makefile.am index 38ab336..ba7a60d 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -3,7 +3,7 @@ INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-arm.la -BUILT_SOURCES = arm_dpimacros.h arm_fpamacros.h +BUILT_SOURCES = arm_dpimacros.h arm_fpamacros.h arm_vfpmacros.h libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ @@ -21,6 +21,13 @@ arm_fpamacros.h: fpaops.sh fpam_macros.th fpa_macros.th (cd $(srcdir); bash ./fpaops.sh) > $@t mv $@t $@ +arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th + (cd $(srcdir); bash ./vfpops.sh) > $@t + mv $@t $@ + CLEANFILES = $(BUILT_SOURCES) -EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th fpam_macros.th fpa_macros.th arm-fpa-codegen.h fpaops.sh +EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \ + fpam_macros.th fpa_macros.th arm-fpa-codegen.h fpaops.sh \ + vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh + diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h new file mode 100644 index 0000000..e1f760b --- /dev/null +++ b/arm/arm-vfp-codegen.h @@ -0,0 +1,183 @@ +#ifndef __MONO_ARM_VFP_CODEGEN_H__ +#define __MONO_ARM_VFP_CODEGEN_H__ + +#include "arm-codegen.h" + +enum { + /* FPA registers */ + ARM_VFP_F0, + ARM_VFP_F1, + ARM_VFP_F2, + ARM_VFP_F3, + ARM_VFP_F4, + ARM_VFP_F5, + ARM_VFP_F6, + ARM_VFP_F7, + ARM_VFP_F8, + ARM_VFP_F9, + ARM_VFP_F10, + ARM_VFP_F11, + ARM_VFP_F12, + ARM_VFP_F13, + ARM_VFP_F14, + ARM_VFP_F15, + ARM_VFP_F16, + ARM_VFP_F17, + ARM_VFP_F18, + ARM_VFP_F19, + ARM_VFP_F20, + ARM_VFP_F21, + ARM_VFP_F22, + ARM_VFP_F23, + ARM_VFP_F24, + ARM_VFP_F25, + ARM_VFP_F26, + ARM_VFP_F27, + ARM_VFP_F28, + ARM_VFP_F29, + ARM_VFP_F30, + ARM_VFP_F31, + + ARM_VFP_D0 = ARM_VFP_F0, + ARM_VFP_D1 = ARM_VFP_F2, + ARM_VFP_D2 = ARM_VFP_F4, + ARM_VFP_D3 = ARM_VFP_F6, + ARM_VFP_D4 = ARM_VFP_F8, + ARM_VFP_D5 = ARM_VFP_F10, + ARM_VFP_D6 = ARM_VFP_F12, + ARM_VFP_D7 = ARM_VFP_F14, + ARM_VFP_D8 = ARM_VFP_F16, + ARM_VFP_D9 = ARM_VFP_F18, + ARM_VFP_D10 = ARM_VFP_F20, + ARM_VFP_D11 = ARM_VFP_F22, + ARM_VFP_D12 = ARM_VFP_F24, + ARM_VFP_D13 = ARM_VFP_F26, + ARM_VFP_D14 = ARM_VFP_F28, + ARM_VFP_D15 = ARM_VFP_F30, + + ARM_VFP_COPROC_SINGLE = 10, + ARM_VFP_COPROC_DOUBLE = 11, + +#define ARM_VFP_OP(p,q,r,s) (((p) << 23) | ((q) << 21) | ((r) << 20) | ((s) << 6)) +#define ARM_VFP_OP2(Fn,N) (ARM_VFP_OP (1,1,1,1) | ((Fn) << 16) | ((N) << 7)) + + ARM_VFP_MUL = ARM_VFP_OP (0,1,0,0), + ARM_VFP_NMUL = ARM_VFP_OP (0,1,0,1), + ARM_VFP_ADD = ARM_VFP_OP (0,1,1,0), + ARM_VFP_SUB = ARM_VFP_OP (0,1,1,1), + ARM_VFP_DIV = ARM_VFP_OP (1,0,0,0), + + ARM_VFP_CPY = ARM_VFP_OP2 (0,0), + ARM_VFP_ABS = ARM_VFP_OP2 (0,1), + ARM_VFP_NEG = ARM_VFP_OP2 (1,0), + ARM_VFP_SQRT = ARM_VFP_OP2 (1,1), + ARM_VFP_CMP = ARM_VFP_OP2 (4,0), + ARM_VFP_CMPE = ARM_VFP_OP2 (4,1), + ARM_VFP_CMPZ = ARM_VFP_OP2 (5,0), + ARM_VFP_CMPEZ = ARM_VFP_OP2 (5,1), + ARM_VFP_CVT = ARM_VFP_OP2 (7,1), + ARM_VFP_UITO = ARM_VFP_OP2 (8,0), + ARM_VFP_SITO = ARM_VFP_OP2 (8,1), + ARM_VFP_TOUI = ARM_VFP_OP2 (12,0), + ARM_VFP_TOSI = ARM_VFP_OP2 (13,0), + ARM_VFP_TOUIZ = ARM_VFP_OP2 (12,1), + ARM_VFP_TOSIZ = ARM_VFP_OP2 (13,1), + + ARM_VFP_SID = 0, + ARM_VFP_SCR = 1 << 1, + ARM_VFP_EXC = 8 << 1 +}; + +#define ARM_DEF_VFP_DYADIC(cond,cp,op,Fd,Fn,Fm) \ + (14 << 24) | \ + ((cp) << 8) | \ + (op) | \ + (((Fd) >> 1) << 12) | \ + (((Fd) & 1) << 22) | \ + (((Fn) >> 1) << 16) | \ + (((Fn) & 1) << 7) | \ + (((Fm) >> 1) << 0) | \ + (((Fm) & 1) << 5) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_VFP_MONADIC(cond,cp,op,Fd,Fm) \ + (14 << 24) | \ + ((cp) << 8) | \ + (op) | \ + (((Fd) >> 1) << 12) | \ + (((Fd) & 1) << 22) | \ + (((Fm) >> 1) << 0) | \ + (((Fm) & 1) << 5) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \ + ((offset) >= 0? (offset)>>2: -(offset)>>2) | \ + ((6 << 25) | \ + ((cp) << 8) | \ + (((Fd) >> 1) << 12) | \ + (((Fd) & 1) << 22) | \ + ((basereg) << 16) | \ + ((ls) << 20) | \ + ((wback) << 21) | \ + (((offset) >= 0) << 23) | \ + ((wback) << 21) | \ + ((post) << 24) | \ + ARM_DEF_COND(cond) + +#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) \ + (14 << 24) | \ + (1 << 4) | \ + ((cp) << 8) | \ + ((op) << 21) | \ + ((L) << 20) | \ + ((Rd) << 12) | \ + (((Fn) >> 1) << 16) | \ + (((Fn) & 1) << 7) | \ + ARM_DEF_COND(cond) + +/* FP load and stores */ +#define ARM_FLDS_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_LDR,0,(base),(freg),(offset))) +#define ARM_FLDS(p,freg,base,offset) \ + ARM_FLDS_COND(p,freg,base,offset,ARMCOND_AL) + +#define ARM_FLDD_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_LDR,0,(base),(freg),(offset))) +#define ARM_FLDD(p,freg,base,offset) \ + ARM_FLDD_COND(p,freg,base,offset,ARMCOND_AL) + +#define ARM_FSTS_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_STR,0,(base),(freg),(offset))) +#define ARM_FSTS(p,freg,base,offset) \ + ARM_FSTS_COND(p,freg,base,offset,ARMCOND_AL) + +#define ARM_FSTD_COND(p,freg,base,offset,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_STR,0,(base),(freg),(offset))) +#define ARM_FSTD(p,freg,base,offset) \ + ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL) + +#include "arm_vfpmacros.h" + +#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) +/* coprocessor register transfer */ +#define ARM_FMSR(p,freg,reg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg))) +#define ARM_FMRS(p,reg,freg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,1,(freg),(reg))) + +#define ARM_FMDLR(p,freg,reg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,0,(freg),(reg))) +#define ARM_FMRDL(p,reg,freg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,1,(freg),(reg))) +#define ARM_FMDHR(p,freg,reg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,0,(freg),(reg))) +#define ARM_FMRDH(p,reg,freg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,1,(freg),(reg))) + +#define ARM_FMXR(p,freg,reg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg))) +#define ARM_FMRX(p,reg,freg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(freg),(reg))) + +#endif /* __MONO_ARM_VFP_CODEGEN_H__ */ + diff --git a/arm/arm_vfpmacros.h b/arm/arm_vfpmacros.h new file mode 100644 index 0000000..f502645 --- /dev/null +++ b/arm/arm_vfpmacros.h @@ -0,0 +1,299 @@ +/* Macros for VFP ops, auto-generated from template */ + + +/* dyadic */ + +/* -- ADD -- */ + + +/* Fd := Fn ADD Fm */ +#define ARM_VFP_ADDD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_ADD,rd,rn,rm)) +#define ARM_VFP_ADDD(p, rd, rn, rm) \ + ARM_VFP_ADDD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_ADDS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_ADD,rd,rn,rm)) +#define ARM_VFP_ADDS(p, rd, rn, rm) \ + ARM_VFP_ADDS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- SUB -- */ + + +/* Fd := Fn SUB Fm */ +#define ARM_VFP_SUBD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_SUB,rd,rn,rm)) +#define ARM_VFP_SUBD(p, rd, rn, rm) \ + ARM_VFP_SUBD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_SUBS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_SUB,rd,rn,rm)) +#define ARM_VFP_SUBS(p, rd, rn, rm) \ + ARM_VFP_SUBS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- MUL -- */ + + +/* Fd := Fn MUL Fm */ +#define ARM_VFP_MULD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_MUL,rd,rn,rm)) +#define ARM_VFP_MULD(p, rd, rn, rm) \ + ARM_VFP_MULD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_MULS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_MUL,rd,rn,rm)) +#define ARM_VFP_MULS(p, rd, rn, rm) \ + ARM_VFP_MULS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- NMUL -- */ + + +/* Fd := Fn NMUL Fm */ +#define ARM_VFP_NMULD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_NMUL,rd,rn,rm)) +#define ARM_VFP_NMULD(p, rd, rn, rm) \ + ARM_VFP_NMULD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_NMULS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_NMUL,rd,rn,rm)) +#define ARM_VFP_NMULS(p, rd, rn, rm) \ + ARM_VFP_NMULS_COND(p, rd, rn, rm, ARMCOND_AL) + + +/* -- DIV -- */ + + +/* Fd := Fn DIV Fm */ +#define ARM_VFP_DIVD_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_DIV,rd,rn,rm)) +#define ARM_VFP_DIVD(p, rd, rn, rm) \ + ARM_VFP_DIVD_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_DIVS_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_DIV,rd,rn,rm)) +#define ARM_VFP_DIVS(p, rd, rn, rm) \ + ARM_VFP_DIVS_COND(p, rd, rn, rm, ARMCOND_AL) + + + +/* monadic */ + +/* -- CPY -- */ + + +/* Fd := CPY Fm */ + +#define ARM_CPYD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CPY,(dreg),(sreg))) +#define ARM_CPYD(p,dreg,sreg) ARM_CPYD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CPYS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CPY,(dreg),(sreg))) +#define ARM_CPYS(p,dreg,sreg) ARM_CPYS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- ABS -- */ + + +/* Fd := ABS Fm */ + +#define ARM_ABSD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_ABS,(dreg),(sreg))) +#define ARM_ABSD(p,dreg,sreg) ARM_ABSD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_ABSS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_ABS,(dreg),(sreg))) +#define ARM_ABSS(p,dreg,sreg) ARM_ABSS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- NEG -- */ + + +/* Fd := NEG Fm */ + +#define ARM_NEGD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_NEG,(dreg),(sreg))) +#define ARM_NEGD(p,dreg,sreg) ARM_NEGD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_NEGS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_NEG,(dreg),(sreg))) +#define ARM_NEGS(p,dreg,sreg) ARM_NEGS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- SQRT -- */ + + +/* Fd := SQRT Fm */ + +#define ARM_SQRTD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_SQRT,(dreg),(sreg))) +#define ARM_SQRTD(p,dreg,sreg) ARM_SQRTD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_SQRTS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_SQRT,(dreg),(sreg))) +#define ARM_SQRTS(p,dreg,sreg) ARM_SQRTS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- CMP -- */ + + +/* Fd := CMP Fm */ + +#define ARM_CMPD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMP,(dreg),(sreg))) +#define ARM_CMPD(p,dreg,sreg) ARM_CMPD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CMPS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMP,(dreg),(sreg))) +#define ARM_CMPS(p,dreg,sreg) ARM_CMPS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- CMPE -- */ + + +/* Fd := CMPE Fm */ + +#define ARM_CMPED_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPE,(dreg),(sreg))) +#define ARM_CMPED(p,dreg,sreg) ARM_CMPED_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CMPES_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPE,(dreg),(sreg))) +#define ARM_CMPES(p,dreg,sreg) ARM_CMPES_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- CMPZ -- */ + + +/* Fd := CMPZ Fm */ + +#define ARM_CMPZD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPZ,(dreg),(sreg))) +#define ARM_CMPZD(p,dreg,sreg) ARM_CMPZD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CMPZS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPZ,(dreg),(sreg))) +#define ARM_CMPZS(p,dreg,sreg) ARM_CMPZS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- CMPEZ -- */ + + +/* Fd := CMPEZ Fm */ + +#define ARM_CMPEZD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPEZ,(dreg),(sreg))) +#define ARM_CMPEZD(p,dreg,sreg) ARM_CMPEZD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CMPEZS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPEZ,(dreg),(sreg))) +#define ARM_CMPEZS(p,dreg,sreg) ARM_CMPEZS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- CVT -- */ + + +/* Fd := CVT Fm */ + +#define ARM_CVTD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CVT,(dreg),(sreg))) +#define ARM_CVTD(p,dreg,sreg) ARM_CVTD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_CVTS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CVT,(dreg),(sreg))) +#define ARM_CVTS(p,dreg,sreg) ARM_CVTS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- UITO -- */ + + +/* Fd := UITO Fm */ + +#define ARM_UITOD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_UITO,(dreg),(sreg))) +#define ARM_UITOD(p,dreg,sreg) ARM_UITOD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_UITOS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_UITO,(dreg),(sreg))) +#define ARM_UITOS(p,dreg,sreg) ARM_UITOS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- SITO -- */ + + +/* Fd := SITO Fm */ + +#define ARM_SITOD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_SITO,(dreg),(sreg))) +#define ARM_SITOD(p,dreg,sreg) ARM_SITOD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_SITOS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_SITO,(dreg),(sreg))) +#define ARM_SITOS(p,dreg,sreg) ARM_SITOS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- TOUI -- */ + + +/* Fd := TOUI Fm */ + +#define ARM_TOUID_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOUI,(dreg),(sreg))) +#define ARM_TOUID(p,dreg,sreg) ARM_TOUID_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_TOUIS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOUI,(dreg),(sreg))) +#define ARM_TOUIS(p,dreg,sreg) ARM_TOUIS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- TOSI -- */ + + +/* Fd := TOSI Fm */ + +#define ARM_TOSID_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOSI,(dreg),(sreg))) +#define ARM_TOSID(p,dreg,sreg) ARM_TOSID_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_TOSIS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOSI,(dreg),(sreg))) +#define ARM_TOSIS(p,dreg,sreg) ARM_TOSIS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- TOUIZ -- */ + + +/* Fd := TOUIZ Fm */ + +#define ARM_TOUIZD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOUIZ,(dreg),(sreg))) +#define ARM_TOUIZD(p,dreg,sreg) ARM_TOUIZD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_TOUIZS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOUIZ,(dreg),(sreg))) +#define ARM_TOUIZS(p,dreg,sreg) ARM_TOUIZS_COND(p,dreg,sreg,ARMCOND_AL) + + +/* -- TOSIZ -- */ + + +/* Fd := TOSIZ Fm */ + +#define ARM_TOSIZD_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOSIZ,(dreg),(sreg))) +#define ARM_TOSIZD(p,dreg,sreg) ARM_TOSIZD_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_TOSIZS_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOSIZ,(dreg),(sreg))) +#define ARM_TOSIZS(p,dreg,sreg) ARM_TOSIZS_COND(p,dreg,sreg,ARMCOND_AL) + + + + + + +/* end generated */ + diff --git a/arm/fpaops.sh b/arm/fpaops.sh index 416b894..fa6a280 100755 --- a/arm/fpaops.sh +++ b/arm/fpaops.sh @@ -19,6 +19,6 @@ gen "$DYADIC" fpa_macros echo -e "\n/* monadic */\n" gen "$MONADIC" fpam_macros -echo -e "\n\n" >> $OUTFILE +echo -e "\n\n" echo -e "\n/* end generated */\n" diff --git a/arm/vfp_macros.th b/arm/vfp_macros.th new file mode 100644 index 0000000..cca67dc --- /dev/null +++ b/arm/vfp_macros.th @@ -0,0 +1,15 @@ +/* -- -- */ + + +/* Fd := Fn Fm */ +#define ARM_VFP_D_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_,rd,rn,rm)) +#define ARM_VFP_D(p, rd, rn, rm) \ + ARM_VFP_D_COND(p, rd, rn, rm, ARMCOND_AL) + +#define ARM_VFP_S_COND(p, rd, rn, rm, cond) \ + ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_,rd,rn,rm)) +#define ARM_VFP_S(p, rd, rn, rm) \ + ARM_VFP_S_COND(p, rd, rn, rm, ARMCOND_AL) + + diff --git a/arm/vfpm_macros.th b/arm/vfpm_macros.th new file mode 100644 index 0000000..25ad721 --- /dev/null +++ b/arm/vfpm_macros.th @@ -0,0 +1,14 @@ +/* -- -- */ + + +/* Fd := Fm */ + +#define ARM_D_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_,(dreg),(sreg))) +#define ARM_D(p,dreg,sreg) ARM_D_COND(p,dreg,sreg,ARMCOND_AL) + +#define ARM_S_COND(p,dreg,sreg,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_,(dreg),(sreg))) +#define ARM_S(p,dreg,sreg) ARM_S_COND(p,dreg,sreg,ARMCOND_AL) + + diff --git a/arm/vfpops.sh b/arm/vfpops.sh new file mode 100755 index 0000000..4f850f0 --- /dev/null +++ b/arm/vfpops.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +DYADIC="ADD SUB MUL NMUL DIV" +MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ" + +# $1: opcode list +# $2: template +function gen() { + for i in $1; do + sed "s//$i/g" $2.th + done +} + +echo -e "/* Macros for VFP ops, auto-generated from template */\n" + +echo -e "\n/* dyadic */\n" +gen "$DYADIC" vfp_macros + +echo -e "\n/* monadic */\n" +gen "$MONADIC" vfpm_macros + +echo -e "\n\n" + +echo -e "\n/* end generated */\n" -- cgit v1.1 From 538fd0794b9ef24f7c765891ed682fc947cf8e85 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 12 Sep 2006 13:02:59 +0000 Subject: 2006-09-12 Zoltan Varga * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov . svn path=/trunk/mono/; revision=65305 --- ChangeLog | 3 +++ alpha/alpha-codegen.h | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index ab85855..70d8c47 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2006-09-12 Zoltan Varga + + * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov . Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index 194c327..4bdf329 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -292,7 +292,7 @@ typedef enum { /* pal calls */ /* #define alpha_halt( ins ) alpha_encode_palcall( ins, 0, 0 ) */ -#define alpha_call_pal( ins, func ) alpha_encode_palcall( ins, 0, x ) +#define alpha_call_pal( ins, func ) alpha_encode_palcall( ins, 0, func ) /*memory*/ #define alpha_lda( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x08, Rdest, Rsrc, offset ) @@ -538,11 +538,17 @@ typedef enum { #define alpha_subt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A1, Rsrc1, Rsrc2, Rdest ) #define alpha_mult(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A2, Rsrc1, Rsrc2, Rdest ) #define alpha_divt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A3, Rsrc1, Rsrc2, Rdest ) + #define alpha_cmptun(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A4, Rsrc1, Rsrc2, Rdest ) #define alpha_cmpteq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A5, Rsrc1, Rsrc2, Rdest ) #define alpha_cmptlt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A6, Rsrc1, Rsrc2, Rdest ) #define alpha_cmptle(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A7, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptun_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A4, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmpteq_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A5, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptlt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A6, Rsrc1, Rsrc2, Rdest ) +#define alpha_cmptle_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A7, Rsrc1, Rsrc2, Rdest ) + #define alpha_cvtts(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AC, alpha_fzero, Rsrc2, Rdest ) #define alpha_cvttq(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AF, alpha_fzero, Rsrc2, Rdest ) #define alpha_cvtqs(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BC, alpha_fzero, Rsrc2, Rdest ) @@ -553,6 +559,7 @@ typedef enum { #define alpha_cpysn(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x021, Rsrc1, Rsrc2, Rdest ) #define alpha_cpyse(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x022, Rsrc1, Rsrc2, Rdest ) +#define alpha_trapb(ins) alpha_encode_op(ins, 0x18, 0, 0, 0, 0); #endif -- cgit v1.1 From f99322f3ea7b7be85ac63c87c664aafb7f5e17bf Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Wed, 11 Oct 2006 21:34:24 +0000 Subject: 2006-10-11 Sergey Tikhonov * atomic.h: Fix atomic decrement. * mini/cpu-alpha.md: Use native long shift insts * mono/mono/mini/tramp-alpha.c: Implemented mono_arch_patch_delegate_trampoline method * Started work on using global registers * Use byte/word memory load/store insts if cpu supports it * Code clean up svn path=/trunk/mono/; revision=66573 --- alpha/alpha-codegen.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index 4bdf329..ee809f5 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -516,6 +516,9 @@ typedef enum { #define alpha_mulq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, Rsrc2, Rdest ) #define alpha_mulq_(ins, Rsrc1, lit, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, lit, Rdest ) +#define alpha_sextb(ins, Rsrc2, Rdest) alpha_encode_op( ins, 0x1c, 0x00, alpha_zero, Rsrc2, Rdest ) +#define alpha_sextw(ins, Rsrc2, Rdest) alpha_encode_op( ins, 0x1c, 0x01, alpha_zero, Rsrc2, Rdest ) + // For 264 #define alpha_ftois( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x078, RFsrc, alpha_zero, Rdest ) #define alpha_ftoit( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) -- cgit v1.1 From 6f8d67005785ba86e81ac930325767d0b270a070 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 10 Nov 2006 18:42:10 +0000 Subject: Typo fixes. svn path=/trunk/mono/; revision=67683 --- arm/arm-vfp-codegen.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index e1f760b..fe15dd0 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -112,7 +112,7 @@ enum { #define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \ ((offset) >= 0? (offset)>>2: -(offset)>>2) | \ - ((6 << 25) | \ + (6 << 25) | \ ((cp) << 8) | \ (((Fd) >> 1) << 12) | \ (((Fd) & 1) << 22) | \ @@ -158,7 +158,6 @@ enum { #include "arm_vfpmacros.h" -#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) /* coprocessor register transfer */ #define ARM_FMSR(p,freg,reg) \ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg))) -- cgit v1.1 From b63503e7c4b5ebb8baafb5b58ec69395146db022 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 15 Nov 2006 16:00:09 +0000 Subject: Wed Nov 15 16:56:53 CET 2006 Paolo Molaro * mips/*: fixes by Mark E Mason . svn path=/trunk/mono/; revision=67929 --- ChangeLog | 5 +++ Makefile.am | 2 +- mips/Makefile.am | 8 ++++ mips/mips-codegen.h | 105 +++++++++++++++++++++++++++++++++++++++++----------- mips/test.c | 18 +++++++-- 5 files changed, 112 insertions(+), 26 deletions(-) create mode 100644 mips/Makefile.am diff --git a/ChangeLog b/ChangeLog index 70d8c47..11b6d86 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ + +Wed Nov 15 16:56:53 CET 2006 Paolo Molaro + + * mips/*: fixes by Mark E Mason . + 2006-09-12 Zoltan Varga * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov . diff --git a/Makefile.am b/Makefile.am index 70b24b9..31c55c2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 +DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 mips INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/mips/Makefile.am b/mips/Makefile.am new file mode 100644 index 0000000..c272d04 --- /dev/null +++ b/mips/Makefile.am @@ -0,0 +1,8 @@ + +INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) + +noinst_LTLIBRARIES = libmonoarch-mips.la + +libmonoarch_mips_la_SOURCES = mips-codegen.h + +noinst_PROGRAMS = test diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h index 8cc0cd0..adb3200 100644 --- a/mips/mips-codegen.h +++ b/mips/mips-codegen.h @@ -33,7 +33,7 @@ enum { mips_s6, mips_s7, mips_t8, /* 24 temps */ - mips_t9, + mips_t9, /* 25 temp / pic call-through register */ mips_k0, /* 26 kernel-reserved */ mips_k1, mips_gp, /* 28 */ @@ -45,29 +45,39 @@ enum { /* we treat the register file as containing just doubles... */ enum { mips_f0, /* return regs */ + mips_f1, mips_f2, + mips_f3, mips_f4, /* temps */ + mips_f5, mips_f6, + mips_f7, mips_f8, + mips_f9, mips_f10, + mips_f11, mips_f12, /* first arg */ + mips_f13, mips_f14, /* second arg */ + mips_f15, mips_f16, /* temps */ + mips_f17, mips_f18, + mips_f19, mips_f20, /* callee saved */ + mips_f21, mips_f22, + mips_f23, mips_f24, + mips_f25, mips_f26, + mips_f27, mips_f28, - mips_f30 + mips_f29, + mips_f30, + mips_f31 }; -#define mips_emit32(c,x) do { *((unsigned int *) c) = x; ((unsigned int *)c)++;} while (0) -#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|(imm))) -#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|(imm))) -#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func))) -#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun))) - /* prefetch hints */ enum { MIPS_FOR_LOAD, @@ -118,18 +128,69 @@ enum { MIPS_FPU_CAUSES_OFFSET = 12 }; -/* fpu condition values */ +/* fpu condition values - see manual entry for C.cond.fmt instructions */ enum { - MIPS_FPU_FALSE, /* TRUE */ - MIPS_FPU_UNORDERED, /* ORDERED */ - MIPS_FPU_EQ, /* NOT_EQUAL */ - MIPS_FPU_UNORD_EQ, /* ORDERED or NEQ */ - MIPS_FPU_ORD_LT, /* UNORDERED or GE */ - MIPS_FPU_UNORD_LT, /* ORDERED or GE */ - MIPS_FPU_ORD_LE, /* UNORDERED or GT */ - MIPS_FPU_UNORD_LE /* OREDERED or GT */ + MIPS_FPU_F, + MIPS_FPU_UN, + MIPS_FPU_EQ, + MIPS_FPU_UEQ, + MIPS_FPU_OLT, + MIPS_FPU_ULT, + MIPS_FPU_OLE, + MIPS_FPU_ULE, + MIPS_FPU_SF, + MIPS_FPU_NGLE, + MIPS_FPU_SEQ, + MIPS_FPU_NGL, + MIPS_FPU_LT, + MIPS_FPU_NGE, + MIPS_FPU_LE, + MIPS_FPU_NGT }; +#define mips_emit32(c,x) do { \ + *((guint32 *) (void *)(c)) = x; \ + (c) = (typeof(c))(((guint32 *)(void *)(c)) + 1); \ + } while (0) + +#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((imm)&0xffff))) +#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|((imm)&0x03ffffff))) +#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func))) +#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun))) + +#define mips_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) + +/* Load always using lui/addiu pair (for later patching) */ +#define mips_load(c,D,v) do { \ + if (!mips_is_imm16 ((v))) { \ + if (((guint32)(v)) & (1 << 15)) { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \ + } \ + else { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \ + } \ + mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \ + } \ + else \ + mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \ + } while (0) + +/* load constant - no patch-up */ +#define mips_load_const(c,D,v) do { \ + if (!mips_is_imm16 ((v))) { \ + if (((guint32)(v)) & (1 << 15)) { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \ + } \ + else { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \ + } \ + if (((guint32)(v)) & 0xffff) \ + mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \ + } \ + else \ + mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \ + } while (0) + /* arithmetric ops */ #define mips_add(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,32) #define mips_addi(c,dest,src1,imm) mips_format_i(c,8,src1,dest,imm) @@ -141,6 +202,7 @@ enum { #define mips_daddiu(c,dest,src1,imm) mips_format_i(c,25,src1,dest,imm) #define mips_dsub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,46) #define mips_dsubu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,47) +#define mips_mul(c,dest,src1,src2) mips_format_r(c,28,src1,src2,dest,0,2) #define mips_sub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,34) #define mips_subu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,35) @@ -226,6 +288,7 @@ enum { #define mips_lwl(c,dest,base,offset) mips_format_i(c,34,base,dest,offset) #define mips_lwr(c,dest,base,offset) mips_format_i(c,38,base,dest,offset) #define mips_lwu(c,dest,base,offset) mips_format_i(c,39,base,dest,offset) + #define mips_sb(c,src,base,offset) mips_format_i(c,40,base,src,offset) #define mips_sc(c,src,base,offset) mips_format_i(c,56,base,src,offset) #define mips_scd(c,src,base,offset) mips_format_i(c,60,base,src,offset) @@ -323,10 +386,10 @@ enum { /* fp moves, loads */ #define mips_fmovs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,6) #define mips_fmovd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,6) -#define mips_wmovfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0) -#define mips_wmovtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0) -#define mips_dmovfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0,0) -#define mips_dmovtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0,0) +#define mips_mfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0) +#define mips_mtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0) +#define mips_dmfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0) +#define mips_dmtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0) #define mips_ldc1(c,dest,base,offset) mips_ldc(c,1,dest,base,offset) #define mips_ldxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,1) #define mips_lwc1(c,dest,base,offset) mips_lwc(c,1,dest,base,offset) diff --git a/mips/test.c b/mips/test.c index d83f833..4f5e1ad 100644 --- a/mips/test.c +++ b/mips/test.c @@ -1,10 +1,20 @@ -#include "mips-codegen.h" +#include "config.h" #include +#include + +#define NO_MIPS_JIT_DEBUG + +#include "mips-codegen.h" +#include "mono/metadata/class.h" + +/* don't run the resulting program, it will destroy your computer, + * just objdump -d it to inspect we generated the correct assembler. + */ -int main () { - unsigned int *code, * p; +int main (int argc, char *argv[]) { + guint32 *code, * p; - code = p = malloc (sizeof (int) * 1024); + code = p = (guint32 *) malloc (sizeof (guint32) * 1024); mips_add (p, 3, 4, 5); mips_addi (p, 3, 4, 5); -- cgit v1.1 From edd2746e20c982e094abfd547afad74d8e7d2302 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 20 Nov 2006 16:37:26 +0000 Subject: Mon Nov 20 17:36:45 CET 2006 Paolo Molaro * arm/arm-codegen.h: added suppot for thumb interworking instructions. svn path=/trunk/mono/; revision=68201 --- ChangeLog | 4 ++++ arm/arm-codegen.h | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index 11b6d86..6aa593a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Mon Nov 20 17:36:45 CET 2006 Paolo Molaro + + * arm/arm-codegen.h: added suppot for thumb interworking instructions. + Wed Nov 15 16:56:53 CET 2006 Paolo Molaro * mips/*: fixes by Mark E Mason . diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 5de57b2..41c4a93 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -259,7 +259,13 @@ typedef struct { #define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond)) #define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs)) +#define ARM_DEF_BX(reg,sub,cond) (0x12fff << 8 | (reg) | ((sub) << 4) | ((cond) << ARMCOND_SHIFT)) +#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 1, cond)) +#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg)) + +#define ARM_BLX_REG_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 3, cond)) +#define ARM_BLX_REG(p, reg) ARM_BLX_REG_COND((p), ARMCOND_AL, (reg)) /* Data Processing Instructions - there are 3 types. */ -- cgit v1.1 From 8e25ae408b9d1836130807d3f465023347051332 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Fri, 22 Dec 2006 22:51:15 +0000 Subject: Patch from Sergey Tikhonov Mono on Alpha updates: - Code cleanup - Some hacks to support debugger - updates for "linears" optimization svn path=/trunk/mono/; revision=69976 --- alpha/alpha-codegen.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index ee809f5..5eee20a 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -562,7 +562,8 @@ typedef enum { #define alpha_cpysn(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x021, Rsrc1, Rsrc2, Rdest ) #define alpha_cpyse(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x022, Rsrc1, Rsrc2, Rdest ) -#define alpha_trapb(ins) alpha_encode_op(ins, 0x18, 0, 0, 0, 0); +#define alpha_trapb(ins) alpha_encode_mem_fc( ins, 0x18, 0x0000, 0, 0, 0 ) +#define alpha_mb(ins) alpha_encode_mem_fc( ins, 0x18, 0x4000, 0, 0, 0 ) #endif -- cgit v1.1 From 0251f000fba5c8f99bec6c33beae0c2aabe66451 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 23 Jan 2007 17:11:29 +0000 Subject: * s390x-codegen.h: Add packed attribute to several instruction structures. svn path=/trunk/mono/; revision=71523 --- s390x/ChangeLog | 4 ++++ s390x/s390x-codegen.h | 14 +++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index e05279d..0474263 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2007-01-23 Neale Ferguson + + * s390x-codegen.h: Add packed attribute to several instruction structures. + 2006-03-13 Neale Ferguson * s390x-codegen.h: Fix immediate checks. diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 9ef7475..6ae7058 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -272,7 +272,7 @@ typedef struct { char b2 : 4; int d2 : 20; char op2; -} RXY_Format __attribute__ ((packed)); +} __attribute__ ((packed)) RXY_Format; typedef struct { char op; @@ -305,7 +305,7 @@ typedef struct { char b2 : 4; int d2 : 20; char op2; -} RSY_Format_1 __attribute__ ((packed)); +} __attribute__ ((packed)) RSY_Format_1; typedef struct { char op1; @@ -314,7 +314,7 @@ typedef struct { char b2 : 4; int d2 : 20; char op2; -} RSY_Format_2 __attribute__ ((packed)); +} __attribute__ ((packed)) RSY_Format_2; typedef struct { char op1; @@ -354,14 +354,14 @@ typedef struct { char r1 : 4; char op2 : 4; int i2; -} RIL_Format_1 __attribute__ ((packed)); +} __attribute__ ((packed)) RIL_Format_1; typedef struct { char op1; char m1 : 4; char op2 : 4; int i2; -} RIL_Format_2 __attribute__ ((packed)); +} __attribute__ ((packed)) RIL_Format_2; typedef struct { char op; @@ -376,7 +376,7 @@ typedef struct { char b1 : 4; int d1 : 20; char op2; -} SIY_Format __attribute__ ((packed)); +} __attribute__ ((packed)) SIY_Format; typedef struct { short op; @@ -429,7 +429,7 @@ typedef struct { short d1 : 12; char b2 : 4; short d2 : 12; -} SSE_Format __attribute__ ((packed)); +} __attribute__ ((packed)) SSE_Format; #define s390_emit16(c, x) do \ { \ -- cgit v1.1 From 0ba3e4bdd057c7a0d25767f7647a00f07683b44c Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 24 Jan 2007 20:01:27 +0000 Subject: Wed Jan 24 21:00:40 CET 2007 Paolo Molaro * arm/arm-codegen.h: fixed encoding of short/byte load/store instructions with negative immediate offsets. svn path=/trunk/mono/; revision=71622 --- ChangeLog | 5 +++++ arm/arm-codegen.h | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6aa593a..b6ae4d7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Wed Jan 24 21:00:40 CET 2007 Paolo Molaro + + * arm/arm-codegen.h: fixed encoding of short/byte load/store + instructions with negative immediate offsets. + Mon Nov 20 17:36:45 CET 2006 Paolo Molaro * arm/arm-codegen.h: added suppot for thumb interworking instructions. diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 41c4a93..d8c293a 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -737,10 +737,10 @@ typedef struct { #define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4)) #define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \ - ((imm) & 0xF) | \ + ((imm) < 0?(-(imm)) & 0xF:(imm) & 0xF) | \ ((h) << 5) | \ ((s) << 6) | \ - (((imm) << 4) & (0xF << 8)) | \ + ((imm) < 0?((-(imm)) << 4) & 0xF00:((imm) << 4) & 0xF00) | \ ((rd) << 12) | \ ((rn) << 16) | \ ((ls) << 20) | \ -- cgit v1.1 From b7fd657ee94257eeec946fa9eb11b3f60e7e33e6 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Mon, 12 Mar 2007 16:07:56 +0000 Subject: Mon Mar 12 17:07:32 CET 2007 Paolo Molaro * amd64/amd64-codegen.h: removed some useless size rex prefixes. svn path=/trunk/mono/; revision=74128 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 26 +++++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index b6ae4d7..6d3637e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ +Mon Mar 12 17:07:32 CET 2007 Paolo Molaro + + * amd64/amd64-codegen.h: removed some useless size rex prefixes. + Wed Jan 24 21:00:40 CET 2007 Paolo Molaro * arm/arm-codegen.h: fixed encoding of short/byte load/store diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index b734932..e431b05 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -524,7 +524,7 @@ typedef union { /* Generated from x86-codegen.h */ -#define amd64_breakpoint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_breakpoint(inst); } while (0) +#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) #define amd64_cld_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); } while (0) #define amd64_stosb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); } while (0) #define amd64_stosl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); } while (0) @@ -692,18 +692,18 @@ typedef union { #define amd64_enter_size(inst,framesize) do { amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); } while (0) //#define amd64_leave_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); } while (0) #define amd64_sahf_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); } while (0) -#define amd64_fsin_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fsin(inst); } while (0) -#define amd64_fcos_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fcos(inst); } while (0) -#define amd64_fabs_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fabs(inst); } while (0) -#define amd64_ftst_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ftst(inst); } while (0) -#define amd64_fxam_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fxam(inst); } while (0) -#define amd64_fpatan_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fpatan(inst); } while (0) -#define amd64_fprem_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fprem(inst); } while (0) -#define amd64_fprem1_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fprem1(inst); } while (0) -#define amd64_frndint_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_frndint(inst); } while (0) -#define amd64_fsqrt_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fsqrt(inst); } while (0) -#define amd64_fptan_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fptan(inst); } while (0) -//#define amd64_padding_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_padding((inst),(size)); } while (0) +#define amd64_fsin_size(inst,size) do { x86_fsin(inst); } while (0) +#define amd64_fcos_size(inst,size) do { x86_fcos(inst); } while (0) +#define amd64_fabs_size(inst,size) do { x86_fabs(inst); } while (0) +#define amd64_ftst_size(inst,size) do { x86_ftst(inst); } while (0) +#define amd64_fxam_size(inst,size) do { x86_fxam(inst); } while (0) +#define amd64_fpatan_size(inst,size) do { x86_fpatan(inst); } while (0) +#define amd64_fprem_size(inst,size) do { x86_fprem(inst); } while (0) +#define amd64_fprem1_size(inst,size) do { x86_fprem1(inst); } while (0) +#define amd64_frndint_size(inst,size) do { x86_frndint(inst); } while (0) +#define amd64_fsqrt_size(inst,size) do { x86_fsqrt(inst); } while (0) +#define amd64_fptan_size(inst,size) do { x86_fptan(inst); } while (0) +//#define amd64_padding_size(inst,size) do { x86_padding((inst),(size)); } while (0) #define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); } while (0) #define amd64_epilog_size(inst,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); } while (0) #define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); } while (0) -- cgit v1.1 From 9159abc7ec906d64a15eee8e02b9e5b3f2cce87d Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Thu, 12 Apr 2007 20:45:34 +0000 Subject: * tramp.c: Add MONO_TYPE_PTR case. svn path=/trunk/mono/; revision=75663 --- s390x/ChangeLog | 4 ++++ s390x/tramp.c | 1 + 2 files changed, 5 insertions(+) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index 0474263..e53ab6e 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2007-04-12 Neale Ferguson + + * tramp.c: Add MONO_TYPE_PTR case. + 2007-01-23 Neale Ferguson * s390x-codegen.h: Add packed attribute to several instruction structures. diff --git a/s390x/tramp.c b/s390x/tramp.c index 43306fb..fe9f310 100644 --- a/s390x/tramp.c +++ b/s390x/tramp.c @@ -151,6 +151,7 @@ enum_retvalue: case MONO_TYPE_OBJECT: case MONO_TYPE_R4: case MONO_TYPE_R8: + case MONO_TYPE_PTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_STRING: -- cgit v1.1 From 5ca5ea86f1ff85953c28e0ba3b657268cd2cdfba Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Sun, 15 Apr 2007 09:11:00 +0000 Subject: * tramp.c: Add MONO_TYPE_PTR case. * mini-s390.c: Correct checking for enum type in return value processing. svn path=/trunk/mono/; revision=75718 --- s390/ChangeLog | 4 ++++ s390/tramp.c | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/s390/ChangeLog b/s390/ChangeLog index 1e64bd7..9b41109 100644 --- a/s390/ChangeLog +++ b/s390/ChangeLog @@ -1,3 +1,7 @@ +2007-04-12 Neale Ferguson + + * tramp.c: Add MONO_TYPE_PTR case. + 2005-12-13 Neale Ferguson * s390-codegen.h: Add some new instructions (conditional jumps) diff --git a/s390/tramp.c b/s390/tramp.c index 5499161..475a4bf 100644 --- a/s390/tramp.c +++ b/s390/tramp.c @@ -153,6 +153,7 @@ enum_retvalue: case MONO_TYPE_R8: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: + case MONO_TYPE_PTR: case MONO_TYPE_STRING: sz->code_size += 4; break; @@ -192,7 +193,7 @@ enum_retvalue: case MONO_TYPE_VOID: break; default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); + g_error ("tramp: cannot handle as return value 0x%x", sig->ret->type); } } -- cgit v1.1 From a024b2405701bbee2003e46a0f9b0e2c0486033c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 23 Apr 2007 11:31:33 +0000 Subject: 2007-04-23 Zoltan Varga * alpha/alpha-codegen.h: More alpha port work from Sergey Tikhonov . svn path=/trunk/mono/; revision=76103 --- ChangeLog | 4 ++++ alpha/alpha-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index 6d3637e..4ed77c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2007-04-23 Zoltan Varga + + * alpha/alpha-codegen.h: More alpha port work from + Sergey Tikhonov . Mon Mar 12 17:07:32 CET 2007 Paolo Molaro diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h index 5eee20a..46f95e1 100644 --- a/alpha/alpha-codegen.h +++ b/alpha/alpha-codegen.h @@ -547,6 +547,10 @@ typedef enum { #define alpha_cmptlt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A6, Rsrc1, Rsrc2, Rdest ) #define alpha_cmptle(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A7, Rsrc1, Rsrc2, Rdest ) +#define alpha_addt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A0, Rsrc1, Rsrc2, Rdest ) +#define alpha_subt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A1, Rsrc1, Rsrc2, Rdest ) + + #define alpha_cmptun_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A4, Rsrc1, Rsrc2, Rdest ) #define alpha_cmpteq_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A5, Rsrc1, Rsrc2, Rdest ) #define alpha_cmptlt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A6, Rsrc1, Rsrc2, Rdest ) @@ -557,6 +561,9 @@ typedef enum { #define alpha_cvtqs(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BC, alpha_fzero, Rsrc2, Rdest ) #define alpha_cvtqt(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BE, alpha_fzero, Rsrc2, Rdest ) +#define alpha_divt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A3, Rsrc1, Rsrc2, Rdest ) + +#define alpha_cvtts_su(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5AC, alpha_fzero, Rsrc2, Rdest ) #define alpha_cpys(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x020, Rsrc1, Rsrc2, Rdest ) #define alpha_cpysn(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x021, Rsrc1, Rsrc2, Rdest ) -- cgit v1.1 From 26169bb71cd30b373975373952fb11d7a26b0cca Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 20 May 2007 19:41:51 +0000 Subject: 2007-05-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed. svn path=/trunk/mono/; revision=77730 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 4ed77c2..677dcba 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2007-05-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed. + 2007-04-23 Zoltan Varga * alpha/alpha-codegen.h: More alpha port work from diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index e431b05..5648a1c 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -341,7 +341,7 @@ typedef union { #define amd64_call_reg(inst,reg) \ do { \ - amd64_emit_rex(inst, 8, 0, 0, (reg)); \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) @@ -500,6 +500,8 @@ typedef union { #define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) +#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) + #define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) #define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) -- cgit v1.1 From e971b6ec5cf03043dc227759fced05d5786964d4 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 13 Jun 2007 17:41:53 +0000 Subject: 2007-06-13 Randolph Chung * hppa/hppa-codegen.h: Update with more instructions. * hppa/tramp.c: Disable for linux since we don't support the interpreter. svn path=/trunk/mono/; revision=79463 --- ChangeLog | 7 + hppa/hppa-codegen.h | 738 ++++++++++++++++++++++++++++++++++++++++++++-------- hppa/tramp.c | 3 + 3 files changed, 641 insertions(+), 107 deletions(-) diff --git a/ChangeLog b/ChangeLog index 677dcba..8cdb3dd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ + +2007-06-13 Randolph Chung + + * hppa/hppa-codegen.h: Update with more instructions. + * hppa/tramp.c: Disable for linux since we don't support the + interpreter. + 2007-05-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed. diff --git a/hppa/hppa-codegen.h b/hppa/hppa-codegen.h index 0a9586f..c03a9ef 100644 --- a/hppa/hppa-codegen.h +++ b/hppa/hppa-codegen.h @@ -1,3 +1,6 @@ +#ifndef _HPPA_CODEGEN_H_ +#define _HPPA_CODEGEN_H_ + typedef enum { hppa_r0 = 0, hppa_r1, @@ -36,71 +39,598 @@ typedef enum { hppa_r31 } HPPAIntRegister; -#define hppa_nop(p); \ - do { \ - *(p) = 0x08000240; \ - p++; \ - } while (0) +typedef enum { + hppa_fr0, + hppa_fr1, + hppa_fr2, + hppa_fr3, + hppa_fr4, + hppa_fr5, + hppa_fr6, + hppa_fr7, + hppa_fr8, + hppa_fr9, + hppa_fr10, + hppa_fr11, + hppa_fr12, + hppa_fr13, + hppa_fr14, + hppa_fr15, + hppa_fr16, + hppa_fr17, + hppa_fr18, + hppa_fr19, + hppa_fr20, + hppa_fr21, + hppa_fr22, + hppa_fr23, + hppa_fr24, + hppa_fr25, + hppa_fr26, + hppa_fr27, + hppa_fr28, + hppa_fr29, + hppa_fr30, + hppa_fr31 +} HPPAFloatRegister; -#define hppa_ldb(p, disp, base, dest); \ - do { \ - int neg = (disp) < 0; \ - *(p) = (0x40000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ - p++; \ - } while (0) +#define hppa_opcode(op) ((op) << 26) +#define hppa_opcode_alu(op1, op2) (((op1) << 26) | ((op2) << 6)) +#define hppa_op_r1(r) ((r) << 21) +#define hppa_op_r2(r) ((r) << 16) +#define hppa_op_r3(r) (r) -#define hppa_stb(p, src, disp, base) \ - do { \ - int neg = (disp) < 0; \ - *(p) = (0x60000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ - p++; \ - } while (0) +/* imm5, imm11 and imm14 are encoded by putting the sign bit in the LSB */ +#define hppa_op_imm5(im5) ((((im5) & 0xf) << 1) | (((int)(im5)) < 0)) +#define hppa_op_imm11(im11) ((((im11) & 0x3ff) << 1) | (((int)(im11)) < 0)) +#define hppa_op_imm14(im14) ((((im14) & 0x1fff) << 1) | (((int)(im14)) < 0)) -#define hppa_ldh(p, disp, base, dest) \ - do { \ - int neg = (disp) < 0; \ - g_assert(((disp) & 1) == 0); \ - *(p) = (0x44000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ - p++; \ - } while (0) +/* HPPA uses "selectors" for some operations. The two we need are L% and R% */ +/* lsel: select left 21 bits */ +#define hppa_lsel(v) (((int)(v))>>11) +/* rsel: select right 11 bits */ +#define hppa_rsel(v) (((int)(v))&0x7ff) -#define hppa_sth(p, src, disp, base) \ - do { \ - int neg = (disp) < 0; \ - g_assert(((disp) & 1) == 0); \ - *(p) = (0x64000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ - p++; \ - } while (0) +/* imm12 is used by the conditional branch insns + * w1 (bits [2..12]) + * w (bit 0) + * value = assemble_12(w1,w) = cat(w,w1{10},w1{0..9}) + * (note PA bit numbering) + * + * if the original number is: + * abcdefghijkl + * + * 3 2 1 0 + * 10987654321098765432109876543210 + * cdefghijklb a + */ +static inline int hppa_op_imm12(int im12) +{ + unsigned int a = im12 < 0; + unsigned int b = (im12 >> 10) & 0x1; + unsigned int cdefghijkl = im12 & 0x3ff; -#define hppa_ldw(p, disp, base, dest) \ - do { \ - int neg = (disp) < 0; \ - g_assert(((disp) & 3) == 0); \ - *(p) = (0x48000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((dest) << 16) | neg); \ - p++; \ + return (cdefghijkl << 3) | (b << 2) | a; +} + +/* + * imm17 is used by the BL insn, which has + * w1 (bits [16..20]) + * w2 (bits [2..12]) + * w (bit 0) + * value = assemble_17(w1,w2,w) = cat(w,w1,w2{10},w2{0..9}) + * (note PA bit numbering) + * + * if the original number is: + * abcdefghijklmnopq + * + * 3 2 1 0 + * 10987654321098765432109876543210 + * bcdef hijklmnopqg a + */ +static inline int hppa_op_imm17(int im17) +{ + unsigned int a = im17 < 0; + unsigned int bcdef = (im17 >> 11) & 0x1f; + unsigned int g = (im17 >> 10) & 0x1; + unsigned int hijklmnopq = im17 & 0x3ff; + + return (bcdef << 16) | (hijklmnopq << 3) | (g << 2) | a; +} + +/* imm21 is used by addil and ldil + * + * value = assemble_21(x) = cat(x{20},x{9..19},x{5..6},x{0..4},x{7..8}) + * (note PA bit numbering) + * + * if the original number is: + * abcdefghijklmnopqrstu + * + * 3 2 1 0 + * 10987654321098765432109876543210 + * opqrsmntubcdefghijkla + */ +static inline int hppa_op_imm21(int im21) +{ + unsigned int a = im21 < 0; + unsigned int bcdefghijkl = (im21 >> 9) & 0x7ff; + unsigned int mn = (im21 >> 7) & 0x3; + unsigned int opqrs = (im21 >> 2) & 0x1f; + unsigned int tu = im21 & 0x3; + + return (opqrs << 16) | (mn << 14) | (tu << 12) | (bcdefghijkl << 1) | a; +} + +/* returns 1 if VAL can fit in BITS */ +static inline int hppa_check_bits(int val, int bits) +{ + /* positive offset */ + if (!(val & (1 << (bits - 1))) && (val >> bits) != 0) + return 0; + /* negative offset */ + if ((val & (1 << (bits - 1))) && ((val >> bits) != (-1 >>(bits+2)))) + return 0; + return 1; +} + +static inline void *hppa_emit(void *inp, unsigned int insn) +{ + unsigned int *code = inp; + *code = insn; + return ((char *)code) + 4; +} + +/* Table 5-3: Compare conditons */ +#define HPPA_CMP_COND_NEVER (0) +#define HPPA_CMP_COND_EQ (1) +#define HPPA_CMP_COND_SLT (2) +#define HPPA_CMP_COND_SLE (3) +#define HPPA_CMP_COND_ULT (4) +#define HPPA_CMP_COND_ULE (5) +#define HPPA_CMP_COND_OV (6) +#define HPPA_CMP_COND_ODD (7) + +/* Table 5-3: Subtaction conditions */ +#define HPPA_SUB_COND_NEVER ((0 << 1) | 0) +#define HPPA_SUB_COND_EQ ((1 << 1) | 0) +#define HPPA_SUB_COND_SLT ((2 << 1) | 0) +#define HPPA_SUB_COND_SLE ((3 << 1) | 0) +#define HPPA_SUB_COND_ULT ((4 << 1) | 0) +#define HPPA_SUB_COND_ULE ((5 << 1) | 0) +#define HPPA_SUB_COND_SV ((6 << 1) | 0) +#define HPPA_SUB_COND_OD ((7 << 1) | 0) +#define HPPA_SUB_COND_ALWAYS ((0 << 1) | 1) +#define HPPA_SUB_COND_NE ((1 << 1) | 1) +#define HPPA_SUB_COND_SGE ((2 << 1) | 1) +#define HPPA_SUB_COND_SGT ((3 << 1) | 1) +#define HPPA_SUB_COND_UGE ((4 << 1) | 1) +#define HPPA_SUB_COND_UGT ((5 << 1) | 1) +#define HPPA_SUB_COND_NSV ((6 << 1) | 1) +#define HPPA_SUB_COND_EV ((7 << 1) | 1) + +/* Table 5-4: Addition conditions */ +#define HPPA_ADD_COND_NEVER ((0 << 1) | 0) +#define HPPA_ADD_COND_EQ ((1 << 1) | 0) +#define HPPA_ADD_COND_LT ((2 << 1) | 0) +#define HPPA_ADD_COND_LE ((3 << 1) | 0) +#define HPPA_ADD_COND_NUV ((4 << 1) | 0) +#define HPPA_ADD_COND_ZUV ((5 << 1) | 0) +#define HPPA_ADD_COND_SV ((6 << 1) | 0) +#define HPPA_ADD_COND_OD ((7 << 1) | 0) +#define HPPA_ADD_COND_ALWAYS ((0 << 1) | 1) +#define HPPA_ADD_COND_NE ((1 << 1) | 1) +#define HPPA_ADD_COND_GE ((2 << 1) | 1) +#define HPPA_ADD_COND_GT ((3 << 1) | 1) +#define HPPA_ADD_COND_UV ((4 << 1) | 1) +#define HPPA_ADD_COND_VNZ ((5 << 1) | 1) +#define HPPA_ADD_COND_NSV ((6 << 1) | 1) +#define HPPA_ADD_COND_EV ((7 << 1) | 1) + +/* Table 5-5: Logical instruction conditions */ +#define HPPA_LOGICAL_COND_NEVER ((0 << 1) | 0) +#define HPPA_LOGICAL_COND_ZERO ((1 << 1) | 0) +#define HPPA_LOGICAL_COND_MSB_SET ((2 << 1) | 0) +#define HPPA_LOGICAL_COND_MSB_SET_OR_ZERO ((3 << 1) | 0) +#define HPPA_LOGICAL_COND_LSB_SET ((7 << 1) | 0) +#define HPPA_LOGICAL_COND_ALWAYS ((0 << 1) | 1) +#define HPPA_LOGICAL_COND_NZ ((1 << 1) | 1) +#define HPPA_LOGICAL_COND_MSB_CLR ((2 << 1) | 1) +#define HPPA_LOGICAL_COND_MSB_CLR_AND_NZ ((3 << 1) | 1) +#define HPPA_LOGICAL_COND_LSB_CLR ((7 << 1) | 1) + +/* Table 5-6: Unit Conditions */ +#define HPPA_UNIT_COND_NEVER ((0 << 1) | 0) +#define HPPA_UNIT_COND_SBZ ((2 << 1) | 0) +#define HPPA_UNIT_COND_SHZ ((3 << 1) | 0) +#define HPPA_UNIT_COND_SDC ((4 << 1) | 0) +#define HPPA_UNIT_COND_SBC ((6 << 1) | 0) +#define HPPA_UNIT_COND_SHC ((7 << 1) | 0) +#define HPPA_UNIT_COND_ALWAYS ((0 << 1) | 1) +#define HPPA_UNIT_COND_NBZ ((2 << 1) | 1) +#define HPPA_UNIT_COND_NHZ ((3 << 1) | 1) +#define HPPA_UNIT_COND_NDC ((4 << 1) | 1) +#define HPPA_UNIT_COND_NBC ((6 << 1) | 1) +#define HPPA_UNIT_COND_NHC ((7 << 1) | 1) + +/* Table 5-7: Shift/Extract/Deposit Conditions */ +#define HPPA_BIT_COND_NEVER (0) +#define HPPA_BIT_COND_ZERO (1) +#define HPPA_BIT_COND_MSB_SET (2) +#define HPPA_BIT_COND_LSB_SET (3) +#define HPPA_BIT_COND_ALWAYS (4) +#define HPPA_BIT_COND_SOME_SET (5) +#define HPPA_BIT_COND_MSB_CLR (6) +#define HPPA_BIT_COND_LSB_CLR (7) + +#define hppa_mtsar(p, r) \ + p = hppa_emit (p, hppa_opcode(0x00) | hppa_op_r1(11) | hppa_op_r2(r) | (0xC2 << 5)) + +#define hppa_bl_full(p, n, target, t) do { \ + g_assert (hppa_check_bits (target, 17)); \ + p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(t) | hppa_op_imm17(((int)(((target) - 8)>>2))) | ((n) << 1)); \ +} while (0) + +#define hppa_bl(p, target, t) hppa_bl_full(p, 0, target, t) +#define hppa_bl_n(p, target, t) hppa_bl_full(p, 1, target, t) + +#define hppa_bv(p, x, b) \ + p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(b) | hppa_op_r2(x) | (6 << 13)) + +#define hppa_blr(p, x, t) \ + p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(t) | hppa_op_r2(x) | (2 << 13)) + +/* hardcoded sr = sr4 */ +#define hppa_ble_full(p, n, d, b) \ + p = hppa_emit (p, hppa_opcode(0x39) | hppa_op_r1(b) | hppa_op_imm17(((int)(d)) >> 2) | (1 << 13) | ((n) << 1)) + +#define hppa_ble(p, d, b) hppa_ble_full(p, 0, d, b) +#define hppa_ble_n(p, d, b) hppa_ble_full(p, 1, d, b) + +#define hppa_be_full(p, n, d, b) \ + p = hppa_emit (p, hppa_opcode(0x38) | hppa_op_r1(b) | hppa_op_imm17(((int)(d)) >> 2) | (1 << 13) | ((n) << 1)) + +#define hppa_be(p, d, b) hppa_be_full(p, 0, d, b) +#define hppa_be_n(p, d, b) hppa_be_full(p, 1, d, b) + +#define hppa_bb_full(p, cond, n, r, b, t) \ + p = hppa_emit (p, hppa_opcode(0x31) | hppa_op_r1(b) | hppa_op_r2(r) | ((cond) << 13) | ((n) << 1) | hppa_op_imm12((int)(t))) + +#define hppa_bb(p, cond, r, b, t) hppa_bb_full(p, cond, 0, r, b, t) +#define hppa_bb_n(p, cond, r, b, t) hppa_bb_full(p, cond, 1, r, b, t) + + +#define hppa_movb(p, r1, r2, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x32) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +#define hppa_movib(p, i, r, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x33) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +#define hppa_combt(p, r1, r2, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x20) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +#define hppa_combf(p, r1, r2, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x22) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +#define hppa_combit(p, i, r, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x21) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +#define hppa_combif(p, i, r, cond, target) do { \ + g_assert (hppa_check_bits (target, 12)); \ + p = hppa_emit (p, hppa_opcode(0x23) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ +} while (0) + +/* TODO: addbt, addbf, addbit, addbif */ + +/* Load/store insns */ +#define hppa_ld_disp(p, op, d, b, t) do { \ + g_assert (hppa_check_bits (d, 14)); \ + p = hppa_emit (p, hppa_opcode(op) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14(((int)(d)))); \ +} while (0) + +#define hppa_ldb(p, d, b, t) hppa_ld_disp(p, 0x10, d, b, t) +#define hppa_ldh(p, d, b, t) hppa_ld_disp(p, 0x11, d, b, t) +#define hppa_ldw(p, d, b, t) hppa_ld_disp(p, 0x12, d, b, t) + +#define hppa_ldwm(p, d, b, t) \ + p = hppa_emit (p, hppa_opcode(0x13) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14(d)); \ + +#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) + +#define hppa_st_disp(p, op, r, d, b) do { \ + g_assert (hppa_check_bits (d, 14)); \ + p = hppa_emit (p, hppa_opcode(op) | hppa_op_r1(b) | hppa_op_r2(r) | hppa_op_imm14(((int)(d)))); \ +} while (0) + +#define hppa_stb(p, r, d, b) hppa_st_disp(p, 0x18, r, d, b) +#define hppa_sth(p, r, d, b) hppa_st_disp(p, 0x19, r, d, b) +#define hppa_stw(p, r, d, b) hppa_st_disp(p, 0x1A, r, d, b) + +#define hppa_stwm(p, r, d, b) \ + p = hppa_emit (p, hppa_opcode(0x1B) | hppa_op_r1(b) | hppa_op_r2(r) | hppa_op_imm14(d)) + +#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) + +/* s = 0, u = 0, cc = 0, m = 0 */ +#define hppa_ld_indexed(p, op, x, b, t) \ + p = hppa_emit (p, hppa_opcode(0x03) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t) | (op << 6)) + +#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) +#define hppa_ldhx(p, x, b, t) hppa_ld_indexed(p, 1, x, b, t) +#define hppa_ldwx(p, x, b, t) hppa_ld_indexed(p, 2, x, b, t) + +#define hppa_ldil(p, i, t) \ + p = hppa_emit (p, hppa_opcode(0x08) | hppa_op_r1(t) | hppa_op_imm21(((int)(i)))) + +#define hppa_ldo(p, d, b, t) \ + p = hppa_emit (p, hppa_opcode(0x0D) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14((int)(d))) + +#define hppa_set(p, imm, t) do { \ + if (hppa_check_bits ((int)(imm), 14)) \ + hppa_ldo (p, (int)(imm), hppa_r0, t); \ + else { \ + hppa_ldil (p, hppa_lsel (imm), t); \ + hppa_ldo (p, hppa_rsel (imm), t, t); \ + } \ +} while (0) + +/* addil's destination is always r1 */ +#define hppa_addil(p, i, r) \ + p = hppa_emit (p, hppa_opcode(0x0A) | hppa_op_r1(r) | hppa_op_imm21(i)) + +#define hppa_alu_op(p, op, cond, r1, r2, t) \ + p = hppa_emit (p, hppa_opcode_alu(0x02, op) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t) | ((cond) << 12)) + +#define hppa_add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x18, cond, r1, r2, t) +#define hppa_add(p, r1, r2, t) hppa_add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x28, cond, r1, r2, t) +#define hppa_addl(p, r1, r2, t) hppa_addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x38, cond, r1, r2, t) +#define hppa_addo(p, r1, r2, t) hppa_addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_addc_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1C, cond, r1, r2, t) +#define hppa_addc(p, r1, r2, t) hppa_addc_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_addco_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3C, cond, r1, r2, t) +#define hppa_addco(p, r1, r2, t) hppa_addco_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh1add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x19, cond, r1, r2, t) +#define hppa_sh1add(p, r1, r2, t) hppa_sh1add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh1addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x29, cond, r1, r2, t) +#define hppa_sh1addl(p, r1, r2, t) hppa_sh1addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh1addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x39, cond, r1, r2, t) +#define hppa_sh1addo(p, r1, r2, t) hppa_sh1addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh2add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1A, cond, r1, r2, t) +#define hppa_sh2add(p, r1, r2, t) hppa_sh2add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh2addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2A, cond, r1, r2, t) +#define hppa_sh2addl(p, r1, r2, t) hppa_sh2addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh2addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3A, cond, r1, r2, t) +#define hppa_sh2addo(p, r1, r2, t) hppa_sh2addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh3add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1B, cond, r1, r2, t) +#define hppa_sh3add(p, r1, r2, t) hppa_sh3add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh3addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2B, cond, r1, r2, t) +#define hppa_sh3addl(p, r1, r2, t) hppa_add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) +#define hppa_sh3addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3B, cond, r1, r2, t) +#define hppa_sh3addo(p, r1, r2, t) hppa_sh3addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) + +#define hppa_sub_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x10, cond, r1, r2, t) +#define hppa_sub(p, r1, r2, t) hppa_sub_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_subo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x30, cond, r1, r2, t) +#define hppa_subo(p, r1, r2, t) hppa_subo_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_subb_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x14, cond, r1, r2, t) +#define hppa_subb(p, r1, r2, t) hppa_subb_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_subbo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x34, cond, r1, r2, t) +#define hppa_subbo(p, r1, r2, t) hppa_subbo_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_subt_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x13, cond, r1, r2, t) +#define hppa_subt(p, r1, r2, t) hppa_subt_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_subto_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x33, cond, r1, r2, t) +#define hppa_subto(p, r1, r2, t) hppa_subto_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_ds_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x11, cond, r1, r2, t) +#define hppa_ds(p, r1, r2, t) hppa_ds_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) +#define hppa_comclr_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x22, cond, r1, r2, t) +#define hppa_comclr(p, r1, r2, t) hppa_comclr_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) + +#define hppa_or_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x09, cond, r1, r2, t) +#define hppa_or(p, r1, r2, t) hppa_or_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) +#define hppa_copy(p, r1, r2) hppa_or(p, r1, hppa_r0, r2) +#define hppa_nop(p) hppa_or(p, hppa_r0, hppa_r0, hppa_r0) +#define hppa_xor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x0A, cond, r1, r2, t) +#define hppa_xor(p, r1, r2, t) hppa_xor_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) +#define hppa_and_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x08, cond, r1, r2, t) +#define hppa_and(p, r1, r2, t) hppa_and_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) +#define hppa_andcm_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x00, cond, r1, r2, t) +#define hppa_andcm(p, r1, r2, t) hppa_andcm_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) + +#define hppa_uxor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x0E, cond, r1, r2, t) +#define hppa_uxor(p, r1, r2, t) hppa_uxor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) +#define hppa_uaddcm_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x26, cond, r1, r2, t) +#define hppa_uaddcm(p, r1, r2, t) hppa_uaddcm_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) +#define hppa_uaddcmt_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x27, cond, r1, r2, t) +#define hppa_uaddcmt(p, r1, r2, t) hppa_uaddcmt_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) +#define hppa_dcor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2E, cond, r1, r2, t) +#define hppa_dcor(p, r1, r2, t) hppa_dcor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) +#define hppa_idcor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2F, cond, r1, r2, t) +#define hppa_idcor(p, r1, r2, t) hppa_idcor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) + +#define hppa_addi(p, i, r, t) \ + p = hppa_emit (p, hppa_opcode(0x2D) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) + +#define hppa_subi(p, i, r, t) \ + p = hppa_emit (p, hppa_opcode(0x25) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) + +#define hppa_not(p, r, t) hppa_subi(p, -1, r, t) + +#define hppa_comiclr(p, i, r, t) \ + p = hppa_emit (p, hppa_opcode(0x24) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) + +#define hppa_vshd(p, r1, r2, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t)) + +/* shift is a register */ +#define hppa_lshr(p, r, shift, t) \ + do { \ + hppa_mtsar(p, shift); \ + hppa_vshd(p, hppa_r0, r, t); \ } while (0) -#define hppa_stw(p, src, disp, base) \ - do { \ - int neg = (disp) < 0; \ - g_assert(((disp) & 3) == 0); \ - *(p) = (0x68000000 | (((disp) & 0x1fff) << 1) | ((base) << 21) | ((src) << 16) | neg); \ - p++; \ +/* shift is a constant */ +#define hppa_shd(p, r1, r2, shift, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t) | (2 << 10) | ((31 - (shift)) << 5)) + +#define hppa_vextru(p, r, len, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (4 << 10) | (32 - (len))) + +#define hppa_vextrs(p, r, len, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (5 << 10) | (32 - (len))) + +/* shift is a register */ +#define hppa_shr(p, r, shift, t) \ + do { \ + hppa_subi(p, 31, shift, t); \ + hppa_mtsar(p, t); \ + hppa_vextrs(p, r, 32, t); \ } while (0) -#define hppa_copy(p, src, dest) \ - do { \ - *(p) = (0x34000000 | ((src) << 21) | ((dest) << 16)); \ - p++; \ +/* shift is a constant */ +#define hppa_extru(p, r, shift, len, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (6 << 10) | ((shift) << 5) | (32 - (len))) + +#define hppa_extrs(p, r, shift, len, t) \ + p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (7 << 10) | ((shift) << 5) | (32 - (len))) + +#define hppa_vdep(p, r, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(r) | hppa_op_r2(t) | (1 << 10) | (32 - (len))) + +#define hppa_dep(p, r, pos, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (3 << 10) | ((31 - (pos)) << 5) | (32 - (len))) + +#define hppa_vdepi(p, i, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (5 << 10) | (32 - (len))) + +#define hppa_depi(p, i, pos, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (7 << 10) | ((31 - (pos)) << 5) | (32 - (len))) + +#define hppa_zvdep(p, r, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (0 << 10) | (32 - (len))) + +/* shift is a register */ +#define hppa_shl(p, r, shift, t) \ + do { \ + hppa_subi(p, 31, shift, t); \ + hppa_mtsar(p, t); \ + hppa_zvdep(p, r, 32, t); \ } while (0) -#define hppa_ldd_with_flags(p, disp, base, dest, m, a) \ - do { \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - g_assert(((disp) & 7) == 0); \ - *(p) = (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p++; \ +#define hppa_zdep(p, r, pos, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (2 << 10) | ((31 - (pos)) << 5) | (32 - (len))) + +#define hppa_zvdepi(p, i, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (4 << 10) | (32 - (len))) + +#define hppa_zdepi(p, i, pos, len, t) \ + p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (6 << 10) | ((31 - (pos)) << 5) | (32 - (len))) + +/* FPU insns */ +/* These are valid for op == 0x0C only, for op == 0x0E there is an extra bit for + * r and t */ +#define hppa_fpu_class0(p, r, sub, fmt, t) \ + p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r) | hppa_op_r3(t) | ((sub) << 13) | ((fmt) << 11)) + +#define hppa_fpu_class1(p, r, sub, df, sf, t) \ + p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r) | hppa_op_r3(t) | ((sub) << 15) | ((df) << 13) | ((sf) << 11) | (1 << 9)) + +#define hppa_fpu_class2(p, r1, r2, sub, fmt, n, cond) \ + p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(cond) | ((sub) << 13) | ((fmt) << 11) | (2 << 9) | ((n) << 5)) + +#define hppa_fpu_class3(p, r1, r2, sub, fmt, t) \ + p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(t) | ((sub) << 13) | ((fmt) << 11) | (3 << 9)) + +#define HPPA_FP_FMT_SGL 0 +#define HPPA_FP_FMT_DBL 1 +#define HPPA_FP_FMT_QUAD 3 + +#define hppa_fcpy(p, fmt, r, t) hppa_fpu_class0(p, r, 2, fmt, t) +#define hppa_fabs(p, fmt, r, t) hppa_fpu_class0(p, r, 3, fmt, t) +#define hppa_fsqrt(p, fmt, r, t) hppa_fpu_class0(p, r, 4, fmt, t) +#define hppa_frnd(p, fmt, r, t) hppa_fpu_class0(p, r, 5, fmt, t) + +#define hppa_fcnvff(p, sf, df, r, t) hppa_fpu_class1(p, r, 0, df, sf, t) +#define hppa_fcnvxf(p, sf, df, r, t) hppa_fpu_class1(p, r, 1, df, sf, t) +#define hppa_fcnvfx(p, sf, df, r, t) hppa_fpu_class1(p, r, 2, df, sf, t) +#define hppa_fcnvfxt(p, sf, df, r, t) hppa_fpu_class1(p, r, 3, df, sf, t) + +#define hppa_fcmp(p, fmt, cond, r1, r2) hppa_fpu_class2(p, r1, r2, 0, fmt, 0, cond) +#define hppa_ftest(p, cond) hppa_fpu_class2(p, 0, 0, 1, 0, 1, cond) + +#define hppa_fadd(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 0, fmt, t) +#define hppa_fsub(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 1, fmt, t) +#define hppa_fmul(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 2, fmt, t) +#define hppa_fdiv(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 3, fmt, t) + +/* Note: fmpyadd and fmpysub have different fmt encodings as the other + * FP ops + */ +#define hppa_fmpyadd(p, fmt, rm1, rm2, tm, ra, ta) \ + p = hppa_emit (p, hppa_opcode(0x06) | hppa_op_r1(rm1) | hppa_op_r2(rm2) | hppa_op_r3(tm) | ((ta) << 11) | ((ra) << 6) | ((fmt) << 5)) + +#define hppa_fmpyadd_sgl(p, rm1, rm2, tm, ra, ta) \ + hppa_fmpyadd(p, 1, rm1, rm2, tm, ra, ta) + +#define hppa_fmpyadd_dbl(p, rm1, rm2, tm, ra, ta) \ + hppa_fmpyadd(p, 0, rm1, rm2, tm, ra, ta) + +#define hppa_fmpysub(p, fmt, rm1, rm2, tm, ra, ta) \ + p = hppa_emit (p, hppa_opcode(0x06) | hppa_op_r1(rm1) | hppa_op_r2(rm2) | hppa_op_r3(tm) | ((ta) << 11) | ((ra) << 6) | ((fmt) << 5)) + +#define hppa_fmpysub_sgl(p, rm1, rm2, tm, ra, ta) \ + hppa_fmpysub(p, 1, rm1, rm2, tm, ra, ta) + +#define hppa_fmpysub_dbl(p, rm1, rm2, tm, ra, ta) \ + hppa_fmpysub(p, 0, rm1, rm2, tm, ra, ta) + +#define hppa_xmpyu(p, r1, r2, t) \ + p = hppa_emit (p, hppa_opcode(0x0E) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(t) | (2 << 13) | (3 << 9) | (1 << 8)) + +#define hppa_fldwx(p, x, b, t, half) \ + p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t) | ((half) << 6)) + +#define hppa_flddx(p, x, b, t) \ + p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t)) + +#define hppa_fstwx(p, r, half, x, b) \ + p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(r) | ((half) << 6) | (1 << 9)) + +#define hppa_fstdx(p, r, x, b) \ + p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(r) | (1 << 9)) + +#define hppa_fldws(p, d, b, t, half) \ + p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(t) | ((half) << 6) | (1 << 12)) + +#define hppa_fldds(p, d, b, t) \ + p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(t) | (1 << 12)) + +#define hppa_fstws(p, r, half, d, b) \ + p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(r) | ((half) << 6) | (1 << 12) | (1 << 9)) + +#define hppa_fstds(p, r, d, b) \ + p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(r) | (1 << 12) | (1 << 9)) + + +/* Not yet converted old macros - used by interpreter */ +#define hppa_ldd_with_flags(p, disp, base, dest, m, a) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + g_assert(((disp) & 7) == 0); \ + *c++ = (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_ldd(p, disp, base, dest) \ @@ -109,13 +639,14 @@ typedef enum { #define hppa_ldd_mb(p, disp, base, dest) \ hppa_ldd_with_flags(p, disp, base, dest, 1, 1) -#define hppa_std_with_flags(p, src, disp, base, m, a); \ - do { \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - g_assert(((disp) & 7) == 0); \ - *(p) = (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p++; \ +#define hppa_std_with_flags(p, src, disp, base, m, a) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + g_assert(((disp) & 7) == 0); \ + *c++ = (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_std(p, disp, base, dest) \ @@ -125,55 +656,60 @@ typedef enum { hppa_std_with_flags(p, disp, base, dest, 1, 0) #define hppa_fldd_with_flags(p, disp, base, dest, m, a) \ - do { \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - *(p) = (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p++; \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + *c++ = (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_fldd(p, disp, base, dest) \ hppa_fldd_with_flags(p, disp, base, dest, 0, 0) -#define hppa_fstd_with_flags(p, src, disp, base, m, a) \ - do { \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - *(p) = (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p++; \ +#define hppa_fstd_with_flags(p, src, disp, base, m, a) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im10a = (disp) >> 3; \ + *c++ = (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_fstd(p, disp, base, dest) \ hppa_fstd_with_flags(p, disp, base, dest, 0, 0) -#define hppa_fldw_with_flags(p, im11a, base, dest, r) \ - do { \ - int neg = (disp) < 0; \ - int im11a = (disp) >> 2; \ - *(p) = (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)); \ - p++; \ +#define hppa_fldw_with_flags(p, im11a, base, dest, r) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im11a = (disp) >> 2; \ + *c++ = (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_fldw(p, disp, base, dest) \ hppa_fldw_with_flags(p, disp, base, dest, 1) -#define hppa_fstw_with_flags(p, src, disp, base, r) \ - do { \ - int neg = (disp) < 0; \ - int im11a = (disp) >> 2; \ - *(p) = (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)); \ - p++; \ +#define hppa_fstw_with_flags(p, src, disp, base, r) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + int neg = (disp) < 0; \ + int im11a = (disp) >> 2; \ + *c++ = (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)); \ + p = (void *)c; \ } while (0) #define hppa_fstw(p, src, disp, base) \ hppa_fstw_with_flags(p, src, disp, base, 1) /* only works on right half SP registers */ -#define hppa_fcnv(p, src, ssng, dest, dsng) \ - do { \ - *(p) = (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)); \ - p++; \ +#define hppa_fcnv(p, src, ssng, dest, dsng) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + *c++ = (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)); \ + p = (void *)c; \ } while (0) #define hppa_fcnv_sng_dbl(p, src, dest) \ @@ -182,25 +718,11 @@ typedef enum { #define hppa_fcnv_dbl_sng(p, src, dest) \ hppa_fcnv(p, src, 0, dest, 1) -#define hppa_ldil(p, val, dest) \ - do { \ - unsigned int t = (val >> 11) & 0x1fffff; \ - unsigned int im21 = ((t & 0x7c) << 14) | ((t & 0x180) << 7) | ((t & 0x3) << 12) | ((t & 0xffe00) >> 8) | ((t & 0x100000) >> 20); \ - *(p) = (0x20000000 | im21 | ((dest) << 21)); \ - p++; \ - } while (0) - -#define hppa_ldo(p, off, base, dest) \ - do { \ - int neg = (off) < 0; \ - *(p) = (0x34000000 | (((off) & 0x1fff)) << 1 | ((base) << 21) | ((dest) << 16) | neg); \ - p++; \ - } while (0) - -#define hppa_extrdu(p, src, pos, len, dest) \ - do { \ - *(p) = (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))); \ - p++; \ +#define hppa_extrdu(p, src, pos, len, dest) \ + do { \ + unsigned int *c = (unsigned int *)(p); \ + *c++ = (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))); \ + p = (void *)c; \ } while (0) #define hppa_bve(p, reg, link) \ @@ -211,3 +733,5 @@ typedef enum { #define hppa_blve(p, reg) \ hppa_bve(p, reg, 1) + +#endif diff --git a/hppa/tramp.c b/hppa/tramp.c index 089abde..e012436 100644 --- a/hppa/tramp.c +++ b/hppa/tramp.c @@ -24,6 +24,8 @@ Trampoline generation for HPPA - currently (Oct 9th 2003) only supports 64 bits - and the HP compiler. */ +#ifndef __linux__ + #include "mono/interpreter/interp.h" #include "mono/metadata/appdomain.h" #include "mono/metadata/tabledefs.h" @@ -776,3 +778,4 @@ generate: return ji->code_start; } +#endif -- cgit v1.1 From 25f0e1d2bd61097c008fa88e4a114884bb6fe0c9 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Wed, 4 Jul 2007 13:17:45 +0000 Subject: Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro * x86/x86-codegen.h: added minimal sse instructions currently needed by the JIT. svn path=/trunk/mono/; revision=81331 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8cdb3dd..8fb7a95 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,9 @@ +Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro + + * x86/x86-codegen.h: added minimal sse instructions currently + needed by the JIT. + 2007-06-13 Randolph Chung * hppa/hppa-codegen.h: Update with more instructions. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 8cf3e80..cb07f65 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -29,6 +29,19 @@ typedef enum { X86_EDI = 7, X86_NREG } X86_Reg_No; + +typedef enum { + X86_XMM0, + X86_XMM1, + X86_XMM2, + X86_XMM3, + X86_XMM4, + X86_XMM5, + X86_XMM6, + X86_XMM7, + X86_XMM_NREG +} X86_XMM_Reg_No; + /* // opcodes for alu instructions */ @@ -1694,4 +1707,21 @@ typedef union { x86_ret ((inst)); \ } while (0) +/* minimal SSE* support */ +#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf2; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ + } while (0) + +#define x86_cvttsd2si(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf2; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x2c; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + #endif // X86_H -- cgit v1.1 From e43f3ebed2b5b54c47b5f8ce458788dce0ef97dc Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 14 Jul 2007 14:04:54 +0000 Subject: 2007-07-14 Zoltan Varga * amd64/amd64-codegen.h: Remove some unused rex prefixes. svn path=/trunk/mono/; revision=81979 --- ChangeLog | 3 +++ amd64/amd64-codegen.h | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 8fb7a95..3312495 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +2007-07-14 Zoltan Varga + + * amd64/amd64-codegen.h: Remove some unused rex prefixes. Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 5648a1c..f9f26f8 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -670,13 +670,13 @@ typedef union { #define amd64_loopne_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); } while (0) #define amd64_jump32_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); } while (0) #define amd64_jump8_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); } while (0) -#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) +#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { x86_jump_code((inst),(target)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); } while (0) -#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) #define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) #define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) -- cgit v1.1 From 118f4540a2da9cdb72debfb786a9930e93f2a10b Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 9 Oct 2007 00:12:58 +0000 Subject: 2007-10-09 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary rex prefix which trips up valgrind. svn path=/trunk/mono/; revision=87140 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 3312495..b59fedd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2007-10-09 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary + rex prefix which trips up valgrind. + 2007-07-14 Zoltan Varga * amd64/amd64-codegen.h: Remove some unused rex prefixes. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index f9f26f8..644d77e 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -672,7 +672,7 @@ typedef union { #define amd64_jump8_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); } while (0) #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) -#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) +#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { x86_jump_code((inst),(target)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) -- cgit v1.1 From 8991f4a9503167171a0ad5e745d71ec4bd8b846c Mon Sep 17 00:00:00 2001 From: Jonathan Chambers Date: Fri, 26 Oct 2007 14:41:54 +0000 Subject: 2007-10-26 Jonathan Chambers * mini-amd64.c: Begin Win64 port. Use AMD64_ARG_REG# defines to access param registers. Replace long usage with gsize as sizeof(long) != sizeof(void*) on Win64. * mini-amd64.h: Add %rdi and %rsi to MonoLMF structure on Win64. Fix intrinsic, use _AddressOfReturnAddress instead of non-existant _GetAddressOfReturnAddress. * tramp-amd64.c: Use AMD64_ARG_REG# defines to access param registers. Save/restore %rdi and %rsi in MonoLMF. * exceptions-amd64.c: Use AMD64_ARG_REG# defines to access param registers. Modify (throw_exception) signature to take %rdi and %rsi on Win64. Code is contributed under MIT/X11 license. 2007-10-26 Jonathan Chambers * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG# defines to access param registers. Replace long usage with gsize as sizeof(long) != sizeof(void*) on Win64. Code is contributed under MIT/X11 license. svn path=/trunk/mono/; revision=88258 --- ChangeLog | 8 ++++++++ amd64/amd64-codegen.h | 36 +++++++++++++++++++++++++++++++----- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index b59fedd..97265a5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2007-10-26 Jonathan Chambers + + * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG# + defines to access param registers. Replace long usage with + gsize as sizeof(long) != sizeof(void*) on Win64. + + Code is contributed under MIT/X11 license. + 2007-10-09 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 644d77e..e0127f1 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -16,6 +16,8 @@ #ifndef AMD64_H #define AMD64_H +#include + typedef enum { AMD64_RAX = 0, AMD64_RCX = 1, @@ -65,6 +67,28 @@ typedef enum AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; +#ifdef PLATFORM_WIN32 +#define AMD64_ARG_REG1 AMD64_RCX +#define AMD64_ARG_REG2 AMD64_RDX +#define AMD64_ARG_REG3 AMD64_R8 +#define AMD64_ARG_REG4 AMD64_R9 +#else +#define AMD64_ARG_REG1 AMD64_RDI +#define AMD64_ARG_REG2 AMD64_RSI +#define AMD64_ARG_REG3 AMD64_RDX +#define AMD64_ARG_REG4 AMD64_RCX +#endif + +#ifdef PLATFORM_WIN32 +#define AMD64_CALLEE_REGS ((1< Date: Thu, 1 Nov 2007 19:03:16 +0000 Subject: 2007-11-01 Geoff Norton * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true Fixes the build on Leopard. svn path=/trunk/mono/; revision=88673 --- ChangeLog | 5 +++++ ppc/Makefile.am | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 97265a5..4ca6138 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2007-11-01 Geoff Norton + + * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true + Fixes the build on Leopard. + 2007-10-26 Jonathan Chambers * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG# diff --git a/ppc/Makefile.am b/ppc/Makefile.am index ddfb109..b013d21 100644 --- a/ppc/Makefile.am +++ b/ppc/Makefile.am @@ -1,3 +1,4 @@ +if INTERP_SUPPORTED INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) @@ -5,4 +6,6 @@ noinst_LTLIBRARIES = libmonoarch-ppc.la libmonoarch_ppc_la_SOURCES = tramp.c ppc-codegen.h -noinst_PROGRAMS = test \ No newline at end of file +noinst_PROGRAMS = test + +endif -- cgit v1.1 From e22c1134d1553f6da21c1ef50ab4afb009d7c215 Mon Sep 17 00:00:00 2001 From: Geoff Norton Date: Mon, 5 Nov 2007 22:28:08 +0000 Subject: 2007-11-01 Geoff Norton * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true Fixes the build on Leopard. svn path=/trunk/mono/; revision=88931 --- ChangeLog | 7 ++++++- x86/Makefile.am | 2 ++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 4ca6138..a066cc7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,11 @@ 2007-11-01 Geoff Norton - * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true + * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true + Fixes the build on Leopard. + +2007-11-01 Geoff Norton + + * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true Fixes the build on Leopard. 2007-10-26 Jonathan Chambers diff --git a/x86/Makefile.am b/x86/Makefile.am index 8d809b8..ab4c142 100644 --- a/x86/Makefile.am +++ b/x86/Makefile.am @@ -1,3 +1,4 @@ +if INTERP_SUPPORTED INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) @@ -5,3 +6,4 @@ noinst_LTLIBRARIES = libmonoarch-x86.la libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h +endif -- cgit v1.1 From b15fabef0c7798e4850432910d97e0249cd691fc Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 10 Nov 2007 15:22:00 +0000 Subject: 2007-11-03 David S. Miller * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi can be used if the constant value only has the top 22 bits set. svn path=/trunk/mono/; revision=89409 --- ChangeLog | 5 +++++ sparc/sparc-codegen.h | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index a066cc7..b4a3ffe 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2007-11-03 David S. Miller + + * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi + can be used if the constant value only has the top 22 bits set. + 2007-11-01 Geoff Norton * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true diff --git a/sparc/sparc-codegen.h b/sparc/sparc-codegen.h index c04f5ce..eb421bb 100644 --- a/sparc/sparc-codegen.h +++ b/sparc/sparc-codegen.h @@ -855,7 +855,7 @@ typedef struct { do { \ if ((val) == 0) \ sparc_clr_reg((ins),(reg)); \ - else if (((guint32)(val) & 0x1fff) == 0) \ + else if (((guint32)(val) & 0x3ff) == 0) \ sparc_sethi((ins),(guint32)(val),(reg)); \ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \ @@ -883,7 +883,8 @@ typedef struct { else if ((val >= -4096) && ((val) <= 4095)) \ sparc_or_imm((ins),FALSE,sparc_g0,bottom_word,(reg)); \ else if ((val >= 0) && (val <= 4294967295L)) { \ - sparc_sethi((ins),bottom_word,(reg)); \ + sparc_sethi((ins),bottom_word,(reg)); \ + if (bottom_word & 0x3ff) \ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \ } \ else if ((val >= 0) && (val <= (1L << 44) - 1)) { \ @@ -913,7 +914,7 @@ typedef struct { do { \ if ((val) == 0) \ sparc_clr_reg((ins),(reg)); \ - else if (((guint32)(val) & 0x1fff) == 0) \ + else if (((guint32)(val) & 0x3ff) == 0) \ sparc_sethi((ins),(guint32)(val),(reg)); \ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \ -- cgit v1.1 From 11c84542edf07ed41b831c12058f9a0bdd83df93 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 20 Nov 2007 17:45:36 +0000 Subject: 2007-11-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller instruction encoding. svn path=/trunk/mono/; revision=90005 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 10 ++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index b4a3ffe..42bedf8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2007-11-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller + instruction encoding. + 2007-11-03 David S. Miller * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index e0127f1..c5cfda0 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -156,17 +156,15 @@ typedef union { #define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ do { \ - if ((reg) == X86_EAX) { \ - amd64_emit_rex(inst, size, 0, 0, 0); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ - x86_imm_emit32 ((inst), (imm)); \ - break; \ - } \ if (x86_is_imm8((imm))) { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ + } else if ((reg) == X86_EAX) { \ + amd64_emit_rex(inst, size, 0, 0, 0); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ + x86_imm_emit32 ((inst), (imm)); \ } else { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x81; \ -- cgit v1.1 From 95aa5dc93dbfbcf10125032ecde0e5eabc969a98 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 24 Jan 2008 20:10:14 +0000 Subject: 2008-01-24 Zoltan Varga * Makefile.am (SUBDIRS): Only set this on arm. svn path=/trunk/mono/; revision=93833 --- ChangeLog | 4 ++++ Makefile.am | 2 ++ 2 files changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 42bedf8..463b2c9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-01-24 Zoltan Varga + + * Makefile.am (SUBDIRS): Only set this on arm. + 2007-11-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller diff --git a/Makefile.am b/Makefile.am index 31c55c2..65e1293 100644 --- a/Makefile.am +++ b/Makefile.am @@ -2,8 +2,10 @@ DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 mips INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +if ARM # arm needs to build some stuff even in JIT mode SUBDIRS = $(arch_target) +endif if INTERP_SUPPORTED -- cgit v1.1 From b951542a9ead8a408c6560a0ffad28a5ade9670d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 24 Jan 2008 20:12:46 +0000 Subject: 2008-01-24 Zoltan Varga * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true. svn path=/trunk/mono/; revision=93834 --- ChangeLog | 2 ++ Makefile.am | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 463b2c9..d5437c6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2008-01-24 Zoltan Varga + * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true. + * Makefile.am (SUBDIRS): Only set this on arm. 2007-11-20 Zoltan Varga diff --git a/Makefile.am b/Makefile.am index 65e1293..0960416 100644 --- a/Makefile.am +++ b/Makefile.am @@ -2,10 +2,14 @@ DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 mips INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +if INTERP_SUPPORTED +SUBDIRS = $(arch_target) +else if ARM # arm needs to build some stuff even in JIT mode SUBDIRS = $(arch_target) endif +endif if INTERP_SUPPORTED -- cgit v1.1 From 9cbc23b5ee9e4f2dca88f8418d11be97079c25a1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 8 Feb 2008 14:28:06 +0000 Subject: 2008-02-08 Zoltan Varga * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes so they are consistent. svn path=/trunk/mono/; revision=95254 --- ChangeLog | 5 +++++ arm/arm-codegen.h | 22 +++++++++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/ChangeLog b/ChangeLog index d5437c6..6458f6d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-02-08 Zoltan Varga + + * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes + so they are consistent. + 2008-01-24 Zoltan Varga * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true. diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index d8c293a..5808890 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -790,22 +790,22 @@ typedef struct { #define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond) -#define ARM_LDRH_REG_REG_COND(p, rm, rd, rn, cond) \ +#define ARM_LDRH_REG_REG_COND(p, rd, rm, rn, cond) \ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond)) -#define ARM_LDRH_REG_REG(p, rm, rd, rn) \ - ARM_LDRH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) -#define ARM_LDRSH_REG_REG_COND(p, rm, rd, rn, cond) \ +#define ARM_LDRH_REG_REG(p, rd, rm, rn) \ + ARM_LDRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL) +#define ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, cond) \ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) -#define ARM_LDRSH_REG_REG(p, rm, rd, rn) \ - ARM_LDRSH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) -#define ARM_LDRSB_REG_REG_COND(p, rm, rd, rn, cond) \ +#define ARM_LDRSH_REG_REG(p, rd, rm, rn) \ + ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL) +#define ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, cond) \ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond)) -#define ARM_LDRSB_REG_REG(p, rm, rd, rn) ARM_LDRSB_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) +#define ARM_LDRSB_REG_REG(p, rd, rm, rn) ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL) -#define ARM_STRH_REG_REG_COND(p, rm, rd, rn, cond) \ +#define ARM_STRH_REG_REG_COND(p, rd, rm, rn, cond) \ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond)) -#define ARM_STRH_REG_REG(p, rm, rd, rn) \ - ARM_STRH_REG_REG_COND(p, rm, rd, rn, ARMCOND_AL) +#define ARM_STRH_REG_REG(p, rd, rm, rn) \ + ARM_STRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL) -- cgit v1.1 From 7a7cef000b9d59672b47c0fcdf75bd1fc00b8c78 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 14 Feb 2008 14:21:56 +0000 Subject: 2008-02-14 Zoltan Varga * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro. svn path=/trunk/mono/; revision=95633 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index 6458f6d..33fb06c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-02-14 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro. + 2008-02-08 Zoltan Varga * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index c5cfda0..4e1e608 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -582,6 +582,7 @@ typedef union { //#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) #define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); } while (0) #define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) +#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) #define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); } while (0) #define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) //#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); } while (0) -- cgit v1.1 From 8c6ca9f3fda169feccab289ecd181e06bcc8e133 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 18 Feb 2008 18:25:24 +0000 Subject: 2008-02-18 Zoltan Varga * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro. svn path=/trunk/mono/; revision=96092 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index 33fb06c..d1b75d2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-02-18 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro. + 2008-02-14 Zoltan Varga * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 4e1e608..fcfea74 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -278,6 +278,13 @@ typedef union { x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0) +#define amd64_movsxd_reg_mem(inst,reg,mem) \ + do { \ + amd64_emit_rex(inst,8,(reg),0,0); \ + *(inst)++ = (unsigned char)0x63; \ + x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ + } while (0) + #define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst,8,(reg),0,(basereg)); \ -- cgit v1.1 From a977d5e7585e338491944fc87b5e018891eedd93 Mon Sep 17 00:00:00 2001 From: Geoff Norton Date: Wed, 12 Mar 2008 17:08:32 +0000 Subject: In .: 2008-03-13 Geoff Norton * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX svn path=/trunk/mono/; revision=98063 --- ChangeLog | 4 ++++ arm/tramp.c | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index d1b75d2..fb08f51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-03-13 Geoff Norton + + * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX + 2008-02-18 Zoltan Varga * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro. diff --git a/arm/tramp.c b/arm/tramp.c index b9e04b5..c2a1c20 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -12,6 +12,7 @@ # include #endif +#if !defined(PLATFORM_MACOSX) #include #include "mono/metadata/class.h" @@ -705,4 +706,4 @@ MonoMethod* mono_method_pointer_get (void* code) c[7] == 'o') return ((MonoMethod**)code)[2]; return NULL; } - +#endif -- cgit v1.1 From cb1954322f73b8d1b0a6836c5242b05538ed72dd Mon Sep 17 00:00:00 2001 From: Jb Evain Date: Sun, 13 Apr 2008 11:44:22 +0000 Subject: last merge 100420:100549 svn path=/branches/jb/ml2/mono/; revision=100550 --- amd64/amd64-codegen.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index fcfea74..2bc268e 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -517,6 +517,8 @@ typedef union { #define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) +#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) + #define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) #define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) -- cgit v1.1 From 0ded1416da01e39a6c4a33fc9798123d4021fe4d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 19 Apr 2008 14:18:56 +0000 Subject: 2008-04-19 Zoltan Varga * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of win64. svn path=/trunk/mono/; revision=101210 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index fb08f51..864dfcf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-04-19 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of + win64. + 2008-03-13 Geoff Norton * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 2bc268e..836a1bd 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -129,7 +129,7 @@ typedef union { #define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) #define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) -#define amd64_is_imm32(val) ((glong)val >= -((glong)1<<31) && (glong)val <= (((glong)1<<31)-1)) +#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ do { \ -- cgit v1.1 From ecbcbb317678440e62a13e16820f95f6ea2dff3d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 6 Jun 2008 02:08:56 +0000 Subject: 2008-06-06 Zoltan Varga * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the instructions recommended by the amd64 manual. svn path=/trunk/mono/; revision=105134 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 864dfcf..b663c06 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-06-06 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the + instructions recommended by the amd64 manual. + 2008-04-19 Zoltan Varga * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 836a1bd..f0dd8ad 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -466,8 +466,16 @@ typedef union { x86_reg_emit ((inst),1,(reg) & 0x7); \ } while (0) +/* From the AMD64 Software Optimization Manual */ #define amd64_padding_size(inst,size) \ - do { if (size == 1) x86_padding ((inst),(size)); else { amd64_emit_rex ((inst),8,0,0,0); x86_padding((inst),(size) - 1); } } while (0) + do { \ + switch ((size)) { \ + case 1: *(inst)++ = 0x90; break; \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ + }; \ + } while (0) #define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ amd64_emit_rex ((inst),0,0,0,(basereg)); \ -- cgit v1.1 From 5c8178c1e6cf4d2370c865c6bc66995ca1174eb9 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Mon, 16 Jun 2008 09:37:01 +0000 Subject: 2008-06-16 Mark Probst * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro nobody uses. svn path=/trunk/mono/; revision=105886 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index b663c06..b286a8f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-06-16 Mark Probst + + * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro + nobody uses. + 2008-06-06 Zoltan Varga * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index f0dd8ad..3bf67c7 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -582,7 +582,7 @@ typedef union { #define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) #define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) #define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg))); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) +#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) #define amd64_inc_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); } while (0) #define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); } while (0) //#define amd64_inc_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); } while (0) -- cgit v1.1 From 386d8b482a7e399e4e8d130dd0d2d2ab405068ae Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Sun, 7 Sep 2008 10:25:11 +0000 Subject: 2008-09-07 Mark Probst * marshal.c (mono_type_native_stack_size): Treat MONO_TYPE_TYPEDBYREF like MONO_TYPE_VALUETYPE. 2008-09-07 Mark Probst * method-to-ir.c (mono_method_to_ir2): Disable tail calls for PPC until they're implemented properly. * exceptions-ppc.c: Use arch-independent exception-handling code instead of custom one. * exceptions-ppc.c, mini-ppc.c, mini-ppc.h: Bug fixes and changes for Linear IR. * tramp-ppc.c, mini-ppc.c: Fixed warnings. * decompose.c, aot-runtime.c, aot-compiler.c: PPC code also applies when __powerpc__ is defined. 2008-09-07 Mark Probst * libtest.c: Darwin structure alignment also applies to PPC. 2008-09-07 Mark Probst * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some warnings. svn path=/trunk/mono/; revision=112455 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index b286a8f..e4956c3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-09-07 Mark Probst + + * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some + warnings. + 2008-06-16 Mark Probst * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index c3181e7..ebcc21e 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -123,7 +123,7 @@ enum { #define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) #define ppc_load(c,D,v) do { \ - if (ppc_is_imm16 ((v))) { \ + if (ppc_is_imm16 ((guint32)(v))) { \ ppc_li ((c), (D), (guint16)(v)); \ } else { \ ppc_lis ((c), (D), (guint32)(v) >> 16); \ -- cgit v1.1 From f2d756dab8d08c009df41d94eb21fdf427a8e01a Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Sat, 27 Sep 2008 13:02:48 +0000 Subject: 2008-09-27 Mark Probst * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings. 2008-09-27 Mark Probst * mini-ppc.c: Compiler warning fixes. svn path=/trunk/mono/; revision=114279 --- ChangeLog | 4 ++++ ppc/ppc-codegen.h | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index e4956c3..bc049b7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-09-27 Mark Probst + + * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings. + 2008-09-07 Mark Probst * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index ebcc21e..9382fd9 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -117,14 +117,14 @@ enum { PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ }; -#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (guint8 *)(c) + sizeof (guint32);} while (0) +#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0) #define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) #define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) #define ppc_load(c,D,v) do { \ if (ppc_is_imm16 ((guint32)(v))) { \ - ppc_li ((c), (D), (guint16)(v)); \ + ppc_li ((c), (D), (guint16)(guint32)(v)); \ } else { \ ppc_lis ((c), (D), (guint32)(v) >> 16); \ ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ -- cgit v1.1 From 922c5a03dc6cd66147b1c6bfeb8c1045176618da Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 3 Oct 2008 14:28:09 +0000 Subject: 2008-10-03 Rodrigo Kumpera * x86/x86-codegen.h: Add macros and enum for SSE instructions. svn path=/trunk/mono/; revision=114751 --- ChangeLog | 4 +++ x86/x86-codegen.h | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/ChangeLog b/ChangeLog index bc049b7..33e080c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-03 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macros for SSE instructions. + 2008-09-27 Mark Probst * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index cb07f65..59a230f 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1707,6 +1707,28 @@ typedef union { x86_ret ((inst)); \ } while (0) + +typedef enum { + X86_SSE_SQRT = 0x51, + X86_SSE_RSQRT = 0x52, + X86_SSE_ADD = 0x58, + X86_SSE_DIV = 0x5E, + X86_SSE_MUL = 0x59, + X86_SSE_SUB = 0x5C, + X86_SSE_MIN = 0x5D, + X86_SSE_MAX = 0x5F, + + X86_SSE_ADDSUB = 0xD0, + X86_SSE_HADD = 0x7C, + X86_SSE_HSUB = 0x7D, + + X86_SSE_PAND = 0XDB, + X86_SSE_POR = 0XEB, + X86_SSE_PXOR = 0XEF, + +} X86_SSE_Opcode; + + /* minimal SSE* support */ #define x86_movsd_reg_membase(inst,dreg,basereg,disp) \ do { \ @@ -1724,4 +1746,88 @@ typedef union { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)(opc); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ + do { \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF2; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF3; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_movups_reg_membase(inst,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + +#define x86_movups_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x28; \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x29; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_reg_reg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x28; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + } while (0) + + +#define x86_movd_reg_xreg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x7e; \ + x86_reg_emit ((inst), (sreg), (dreg)); \ + } while (0) + +#define x86_pshufd_reg_reg(inst,dreg,sreg,mask) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x70; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + *(inst)++ = (unsigned char)mask; \ + } while (0) + #endif // X86_H + -- cgit v1.1 From 5de452f7ff84e26bd22b86205a1cdb9fc207fe75 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 9 Oct 2008 18:28:16 +0000 Subject: 2008-10-09 Rodrigo Kumpera * x86/x86-codegen.h: Add macros for sse shift, pack, unpack, saturated math and packed byte/word/dword math. svn path=/trunk/mono/; revision=115367 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 33e080c..d90994d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-10-09 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macros for sse shift, pack, unpack, + saturated math and packed byte/word/dword math. + 2008-10-03 Rodrigo Kumpera * x86/x86-codegen.h: Add macros for SSE instructions. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 59a230f..ed14f58 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1722,9 +1722,43 @@ typedef enum { X86_SSE_HADD = 0x7C, X86_SSE_HSUB = 0x7D, - X86_SSE_PAND = 0XDB, - X86_SSE_POR = 0XEB, - X86_SSE_PXOR = 0XEF, + X86_SSE_PAND = 0xDB, + X86_SSE_POR = 0xEB, + X86_SSE_PXOR = 0xEF, + + X86_SSE_PADDB = 0xFC, + X86_SSE_PADDW = 0xFD, + X86_SSE_PADDD = 0xFE, + + X86_SSE_PSUBB = 0xF8, + X86_SSE_PSUBW = 0xF9, + X86_SSE_PSUBD = 0xFA, + + X86_SSE_PUNPCKLBW = 0x60, + X86_SSE_PUNPCKLWD = 0x61, + X86_SSE_PUNPCKLDQ = 0x62, + X86_SSE_PUNPCKLQDQ = 0x6C, + + X86_SSE_PUNPCKHBW = 0x68, + X86_SSE_PUNPCKHWD = 0x69, + X86_SSE_PUNPCKHDQ = 0x6A, + X86_SSE_PUNPCKHQDQ = 0x6D, + + X86_SSE_PADDUSB = 0xDC, + X86_SSE_PADDUSW = 0xDD, + X86_SSE_PSUBUSB = 0xD8, + X86_SSE_PSUBUSW = 0xD9, + + X86_SSE_PMULLW = 0xD5, + + X86_SSE_PSHIFTW = 0x71, + X86_SSE_SHR = 2, + X86_SSE_SAR = 4, + X86_SSE_SHL = 6, + + X86_SSE_PSRLW_REG = 0xD1, + X86_SSE_PSRAW_REG = 0xE1, + X86_SSE_PSLLW_REG = 0xF1, } X86_SSE_Opcode; @@ -1820,6 +1854,14 @@ typedef enum { x86_reg_emit ((inst), (sreg), (dreg)); \ } while (0) +#define x86_movd_xreg_reg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x6e; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + } while (0) + #define x86_pshufd_reg_reg(inst,dreg,sreg,mask) \ do { \ *(inst)++ = (unsigned char)0x66; \ @@ -1829,5 +1871,18 @@ typedef enum { *(inst)++ = (unsigned char)mask; \ } while (0) +#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \ + do { \ + x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \ + do { \ + x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \ + } while (0) + + + #endif // X86_H -- cgit v1.1 From ba0739c0dc1dd6713f6127160dcee501b105c300 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 10 Oct 2008 21:55:37 +0000 Subject: 2008-10-10 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets. svn path=/trunk/mono/; revision=115494 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 13 +++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index d90994d..0e68837 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-10 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets. + 2008-10-09 Rodrigo Kumpera * x86/x86-codegen.h: Add macros for sse shift, pack, unpack, diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 3bf67c7..34eaff4 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -484,7 +484,18 @@ typedef union { } while (0) #define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) +#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) +#define amd64_jump_code_size(inst,target,size) do { \ + if (((guint64)(target) >> 32) == 0) { \ + x86_jump_code((inst),(target)); \ + } else { \ + amd64_jump_membase ((inst), AMD64_RIP, 0); \ + *(guint64*)(inst) = (guint64)(target); \ + (inst) += 8; \ + } \ +} while (0) + /* * SSE */ @@ -714,8 +725,6 @@ typedef union { #define amd64_jump8_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); } while (0) #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) -#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_jump_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_jump_code_size(inst,target,size) do { x86_jump_code((inst),(target)); } while (0) #define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -- cgit v1.1 From 494ea4f86907f393c8f0ba660edb100a107a8c80 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 11 Oct 2008 05:26:06 +0000 Subject: 2008-10-11 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support. svn path=/trunk/mono/; revision=115509 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 0e68837..8ed3f26 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-11 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support. + 2008-10-10 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 34eaff4..6ef62c5 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -487,7 +487,7 @@ typedef union { #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) #define amd64_jump_code_size(inst,target,size) do { \ - if (((guint64)(target) >> 32) == 0) { \ + if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ x86_jump_code((inst),(target)); \ } else { \ amd64_jump_membase ((inst), AMD64_RIP, 0); \ -- cgit v1.1 From 18f1e82ca6ebaf0929f654a56ab9ddfadfacacb5 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 13 Oct 2008 01:13:10 +0000 Subject: 2008-10-12 Rodrigo Kumpera * x86/x86-codegen.h: Add macro for sse41 ops. Add defined for pack ops, dword shifts/mul/pack. 2008-10-12 Rodrigo Kumpera * basic-simd.cs: Remove PackWithUnsignedSaturation tests as it turns out that the packuswb/packusdw don't work with unsigned numbers for what would be negative numbers in signed format. * cpu-x86.md: Add doubleword forms of many ops and packing ones. Fix the len of fconv_to_r8_x and xconv_r8_to_i4. * mini-ops.h: Add doubleword forms of many ops and packing ones. * mini-x86.c: Emit doubleword forms of many ops and packing ones. * simd-intrinsics.c (SimdIntrinsc): Rename the flags field to simd_version. * simd-intrinsics.c (vector4f_intrinsics): Use simd_version field for sse3 ops. * simd-intrinsics.c (vector4u_intrinsics): Rename to vector4ui_intrinsics and add more ops. * simd-intrinsics.c (simd_version_name): New function, returns the name of the version as the enum in mini.h. * simd-intrinsics.c (emit_intrinsics): Instead of having a special emit mode for sse3 ops, check the simd_version field if present. This way the code works with all versions of sse. svn path=/trunk/mono/; revision=115610 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8ed3f26..80a66ca 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-10-12 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macro for sse41 ops. + Add defined for pack ops, dword shifts/mul/pack. + 2008-10-11 Zoltan Varga * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ed14f58..93e3677 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1744,14 +1744,19 @@ typedef enum { X86_SSE_PUNPCKHDQ = 0x6A, X86_SSE_PUNPCKHQDQ = 0x6D, + X86_SSE_PACKUSWB = 0x67, + X86_SSE_PACKUSDW = 0x2B,/*sse41*/ + X86_SSE_PADDUSB = 0xDC, X86_SSE_PADDUSW = 0xDD, X86_SSE_PSUBUSB = 0xD8, X86_SSE_PSUBUSW = 0xD9, X86_SSE_PMULLW = 0xD5, + X86_SSE_PMULLD = 0x40,/*sse41*/ X86_SSE_PSHIFTW = 0x71, + X86_SSE_PSHIFTD = 0x72, X86_SSE_SHR = 2, X86_SSE_SAR = 4, X86_SSE_SHL = 6, @@ -1759,6 +1764,10 @@ typedef enum { X86_SSE_PSRLW_REG = 0xD1, X86_SSE_PSRAW_REG = 0xE1, X86_SSE_PSLLW_REG = 0xF1, + + X86_SSE_PSRLD_REG = 0xD2, + X86_SSE_PSRAD_REG = 0xE2, + X86_SSE_PSLLD_REG = 0xF2, } X86_SSE_Opcode; @@ -1810,6 +1819,16 @@ typedef enum { x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)0x38; \ + *(inst)++ = (unsigned char)(opc); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + + #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ do { \ *(inst)++ = (unsigned char)0x0f; \ -- cgit v1.1 From 7ed9633867d31f5dd5fd971611f952574c005a87 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 13 Oct 2008 22:13:15 +0000 Subject: 2008-10-13 Rodrigo Kumpera * x86/x86-codegen.h: Add remaining FP sse1 ops. Add sse ps encoding with imm operand. Add remaining sse1 ops. svn path=/trunk/mono/; revision=115699 --- ChangeLog | 6 ++++++ x86/x86-codegen.h | 14 ++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/ChangeLog b/ChangeLog index 80a66ca..194ceba 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2008-10-13 Rodrigo Kumpera + + * x86/x86-codegen.h: Add remaining FP sse1 ops. + Add sse ps encoding with imm operand. + Add remaining sse1 ops. + 2008-10-12 Rodrigo Kumpera * x86/x86-codegen.h: Add macro for sse41 ops. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 93e3677..5b7eaa7 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1711,12 +1711,20 @@ typedef union { typedef enum { X86_SSE_SQRT = 0x51, X86_SSE_RSQRT = 0x52, + X86_SSE_RCP = 0x53, X86_SSE_ADD = 0x58, X86_SSE_DIV = 0x5E, X86_SSE_MUL = 0x59, X86_SSE_SUB = 0x5C, X86_SSE_MIN = 0x5D, X86_SSE_MAX = 0x5F, + X86_SSE_COMP = 0xC2, + X86_SSE_AND = 0x54, + X86_SSE_ANDN = 0x55, + X86_SSE_OR = 0x56, + X86_SSE_XOR = 0x57, + X86_SSE_UNPCKL = 0x14, + X86_SSE_UNPCKH = 0x15, X86_SSE_ADDSUB = 0xD0, X86_SSE_HADD = 0x7C, @@ -1819,6 +1827,12 @@ typedef enum { x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \ + do { \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + *(inst)++ = (unsigned char)imm; \ + } while (0) + #define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x66; \ -- cgit v1.1 From ec2240eaee83b7c5ff444e0708a114458394d55b Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 14 Oct 2008 15:02:05 +0000 Subject: 2008-10-14 Rodrigo Kumpera * x86/x86-codegen.h: Add movsldup and movshdup. svn path=/trunk/mono/; revision=115785 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 194ceba..545262a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-14 Rodrigo Kumpera + + * x86/x86-codegen.h: Add movsldup and movshdup. + 2008-10-13 Rodrigo Kumpera * x86/x86-codegen.h: Add remaining FP sse1 ops. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 5b7eaa7..26e7aad 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1729,6 +1729,8 @@ typedef enum { X86_SSE_ADDSUB = 0xD0, X86_SSE_HADD = 0x7C, X86_SSE_HSUB = 0x7D, + X86_SSE_MOVSHDUP = 0x16, + X86_SSE_MOVSLDUP = 0x12, X86_SSE_PAND = 0xDB, X86_SSE_POR = 0xEB, -- cgit v1.1 From 0a6e6df8d766d7ad1b21d6c234826293d1317979 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Wed, 15 Oct 2008 20:52:54 +0000 Subject: 2008-10-15 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask. svn path=/trunk/mono/; revision=115919 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 26 ++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/ChangeLog b/ChangeLog index 545262a..c7714ca 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-15 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask. + 2008-10-14 Rodrigo Kumpera * x86/x86-codegen.h: Add movsldup and movshdup. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 26e7aad..4c8bbd6 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1744,6 +1744,21 @@ typedef enum { X86_SSE_PSUBW = 0xF9, X86_SSE_PSUBD = 0xFA, + X86_SSE_PMAXSB = 0x3C, /*sse41*/ + X86_SSE_PMAXSW = 0xEE, + X86_SSE_PMAXSD = 0x3D, /*sse41*/ + + X86_SSE_PMAXUB = 0xDE, + X86_SSE_PMAXUW = 0x3E, /*sse41*/ + X86_SSE_PMAXUD = 0x3F, /*sse41*/ + + X86_SSE_PMINUB = 0xDA, + X86_SSE_PMINUW = 0x3A, /*sse41*/ + X86_SSE_PMINUD = 0x3B, /*sse41*/ + + X86_SSE_PAVGB = 0xE0, + X86_SSE_PAVGW = 0xE3, + X86_SSE_PUNPCKLBW = 0x60, X86_SSE_PUNPCKLWD = 0x61, X86_SSE_PUNPCKLDQ = 0x62, @@ -1764,6 +1779,8 @@ typedef enum { X86_SSE_PMULLW = 0xD5, X86_SSE_PMULLD = 0x40,/*sse41*/ + + X86_SSE_PMOVMSKB = 0xD7, X86_SSE_PSHIFTW = 0x71, X86_SSE_PSHIFTD = 0x72, @@ -1906,6 +1923,15 @@ typedef enum { *(inst)++ = (unsigned char)mask; \ } while (0) +#define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \ + do { \ + *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x70; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + *(inst)++ = (unsigned char)mask; \ + } while (0) + #define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \ do { \ x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \ -- cgit v1.1 From 8336fe34234402529da0e46af634948d678ee649 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 16 Oct 2008 23:22:27 +0000 Subject: 2008-10-16 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int compare equals and psabw. svn path=/trunk/mono/; revision=116117 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index c7714ca..1c39a4b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-10-16 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int compare equals and + psabw. + 2008-10-15 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 4c8bbd6..0b022e3 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1758,6 +1758,12 @@ typedef enum { X86_SSE_PAVGB = 0xE0, X86_SSE_PAVGW = 0xE3, + + X86_SSE_PCMPEQB = 0x74, + X86_SSE_PCMPEQW = 0x75, + X86_SSE_PCMPEQD = 0x76, + + X86_SSE_PSADBW = 0xf6, X86_SSE_PUNPCKLBW = 0x60, X86_SSE_PUNPCKLWD = 0x61, -- cgit v1.1 From 454b5617264c1bb64ff7296669db98a14cc58118 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 17 Oct 2008 17:41:14 +0000 Subject: 2008-10-17 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int shuffle. svn path=/trunk/mono/; revision=116265 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 1c39a4b..df80f27 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-17 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int shuffle. + 2008-10-16 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int compare equals and diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 0b022e3..1837c3a 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1765,6 +1765,8 @@ typedef enum { X86_SSE_PSADBW = 0xf6, + X86_SSE_PSHUFD = 0x70, + X86_SSE_PUNPCKLBW = 0x60, X86_SSE_PUNPCKLWD = 0x61, X86_SSE_PUNPCKLDQ = 0x62, -- cgit v1.1 From 600a42f70b41a94712aac746e44f2bba885dfc1f Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 20 Oct 2008 19:36:04 +0000 Subject: 2008-10-20 Rodrigo Kumpera * x86/x86-codegen.h: Add multiply and store high. svn path=/trunk/mono/; revision=116545 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index df80f27..32dd978 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add multiply and store high. + 2008-10-17 Rodrigo Kumpera * x86/x86-codegen.h: Add packed int shuffle. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 1837c3a..adce9f4 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1787,6 +1787,7 @@ typedef enum { X86_SSE_PMULLW = 0xD5, X86_SSE_PMULLD = 0x40,/*sse41*/ + X86_SSE_PMULHUW = 0xE4, X86_SSE_PMOVMSKB = 0xD7, -- cgit v1.1 From 7a2889c2ce0cfbc193324b64764a02e42f5daee8 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 24 Oct 2008 00:35:54 +0000 Subject: 2008-10-23 Rodrigo Kumpera * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation and compare greater. svn path=/trunk/mono/; revision=116896 --- ChangeLog | 5 + x86/.deps/tramp.Plo | 1 + x86/Makefile | 507 ++++++++++++++ x86/Makefile.in | 506 ++++++++++++++ x86/d | 103 +++ x86/x86-codegen.h | 15 +- x86/x86-codegen.h~ | 1905 +++++++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 3041 insertions(+), 1 deletion(-) create mode 100644 x86/.deps/tramp.Plo create mode 100644 x86/Makefile create mode 100644 x86/Makefile.in create mode 100644 x86/d create mode 100644 x86/x86-codegen.h~ diff --git a/ChangeLog b/ChangeLog index 32dd978..9db33d3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-10-23 Rodrigo Kumpera + + * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation + and compare greater. + 2008-10-20 Rodrigo Kumpera * x86/x86-codegen.h: Add multiply and store high. diff --git a/x86/.deps/tramp.Plo b/x86/.deps/tramp.Plo new file mode 100644 index 0000000..9ce06a8 --- /dev/null +++ b/x86/.deps/tramp.Plo @@ -0,0 +1 @@ +# dummy diff --git a/x86/Makefile b/x86/Makefile new file mode 100644 index 0000000..b892bd9 --- /dev/null +++ b/x86/Makefile @@ -0,0 +1,507 @@ +# Postprocessed with patch-quiet.sh +# Makefile.in generated by automake 1.10 from Makefile.am. +# mono/arch/x86/Makefile. Generated from Makefile.in by configure. + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + + + + +pkgdatadir = $(datadir)/mono +pkglibdir = $(libdir)/mono +pkgincludedir = $(includedir)/mono +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = i686-suse-linux-gnu +host_triplet = i686-suse-linux-gnu +target_triplet = i686-suse-linux-gnu +subdir = mono/arch/x86 +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/nls.m4 $(top_srcdir)/po.m4 \ + $(top_srcdir)/progtest.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.in +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libmonoarch_x86_la_LIBADD = +am__libmonoarch_x86_la_SOURCES_DIST = tramp.c x86-codegen.h +#am_libmonoarch_x86_la_OBJECTS = tramp.lo +libmonoarch_x86_la_OBJECTS = $(am_libmonoarch_x86_la_OBJECTS) +#am_libmonoarch_x86_la_rpath = +DEFAULT_INCLUDES = -I. -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles quiet +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) $(if $(V),,--quiet) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libmonoarch_x86_la_SOURCES) +DIST_SOURCES = $(am__libmonoarch_x86_la_SOURCES_DIST) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = ${SHELL} /home/rodrigo/repo/mono/missing --run aclocal-1.10 +AMTAR = ${SHELL} /home/rodrigo/repo/mono/missing --run tar +API_VER = 1.0 +AR = ar +AS = as +AUTOCONF = ${SHELL} /home/rodrigo/repo/mono/missing --run autoconf +AUTOHEADER = ${SHELL} /home/rodrigo/repo/mono/missing --run autoheader +AUTOMAKE = ${SHELL} /home/rodrigo/repo/mono/missing --run automake-1.10 +AWK = gawk +BASE_DEPENDENCIES_CFLAGS = -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include +BASE_DEPENDENCIES_LIBS = -lglib-2.0 +BISON = yes +BUILD_EXEEXT = +BUILD_GLIB_CFLAGS = -pthread -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include +BUILD_GLIB_LIBS = -pthread -lgthread-2.0 -lrt -lglib-2.0 +CC = gcc +CCAS = gcc +CCASDEPMODE = depmode=gcc3 +CCASFLAGS = -g -O2 +CCDEPMODE = depmode=gcc3 +CC_FOR_BUILD = gcc +CFLAGS = -g -O2 -fno-strict-aliasing -Wdeclaration-after-statement -g -Wall -Wunused -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wno-cast-qual -Wcast-align -Wwrite-strings -mno-tls-direct-seg-refs +CFLAGS_FOR_BUILD = -g -O2 +CPP = gcc -E +CPPFLAGS = -DGC_LINUX_THREADS -D_GNU_SOURCE -D_REENTRANT -DUSE_MMAP -DUSE_MUNMAP -D_FILE_OFFSET_BITS=64 -DUSE_COMPILER_TLS +CXX = g++ +CXXCPP = g++ -E +CXXDEPMODE = depmode=gcc3 +CXXFLAGS = -g -O2 +CYGPATH_W = echo +DEFS = -DHAVE_CONFIG_H +DEPDIR = .deps +DISABLE_SHARED_HANDLES = +DLLTOOL = dlltool +DOLT_BASH = /bin/bash +DTRACE = +DTRACEFLAGS = +ECHO = echo +ECHO_C = +ECHO_N = -n +ECHO_T = +EGREP = /usr/bin/grep -E +EXEEXT = +F77 = +FFLAGS = +GETTEXT_MACRO_VERSION = 0.17 +GLIB_CFLAGS = -pthread -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include +GLIB_LIBS = -pthread -lgthread-2.0 -lrt -lglib-2.0 +GMODULE_CFLAGS = -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include +GMODULE_LIBS = -Wl,--export-dynamic -lgmodule-2.0 -ldl -lglib-2.0 +GMSGFMT = /usr/bin/msgfmt +GMSGFMT_015 = /usr/bin/msgfmt +GREP = /usr/bin/grep +HAVE_BOEHM_GC = +HOST_CC = +INSTALL = /usr/bin/install -c +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_PROGRAM = ${INSTALL} +INSTALL_SCRIPT = ${INSTALL} +INSTALL_STRIP_PROGRAM = $(install_sh) -c -s +INTL = libc.so.6 +LDFLAGS = +LIBC = libc.so.6 +LIBGC_CFLAGS = -I$(top_srcdir)/libgc/include +LIBGC_LIBS = $(top_builddir)/libgc/libmonogc.la +LIBGC_STATIC_LIBS = $(top_builddir)/libgc/libmonogc-static.la +LIBOBJS = +LIBS = -lrt -ldl -lpthread -lm +LIBTOOL = $(top_builddir)/doltlibtool +LN_S = ln -s +LTCOMPILE = $(top_builddir)/doltcompile $(COMPILE) +LTCXXCOMPILE = $(top_builddir)/doltcompile $(CXXCOMPILE) +LTLIBOBJS = +MAINT = +MAKEINFO = ${SHELL} /home/rodrigo/repo/mono/missing --run makeinfo +MKDIR_P = /bin/mkdir -p +MONO_DL_NEED_USCORE = 0 +MSGFMT = /usr/bin/msgfmt +MSGFMT_015 = /usr/bin/msgfmt +MSGMERGE = /usr/bin/msgmerge +OBJDUMP = objdump +OBJEXT = o +OPROFILE_CFLAGS = +OPROFILE_LIBS = +PACKAGE = mono +PACKAGE_BUGREPORT = +PACKAGE_NAME = +PACKAGE_STRING = +PACKAGE_TARNAME = +PACKAGE_VERSION = +PATH_SEPARATOR = : +PKG_CONFIG = /usr/bin/pkg-config +RANLIB = ranlib +SED = /usr/bin/sed +SET_MAKE = +SHELL = /bin/sh +SQLITE = libsqlite.so.0 +SQLITE3 = libsqlite3.so.0 +STRIP = strip +USE_NLS = yes +VERSION = 2.1 +X11 = libX11.so.6 +XATTR_LIB = +XGETTEXT = /usr/bin/xgettext +XGETTEXT_015 = /usr/bin/xgettext +XGETTEXT_EXTRA_OPTIONS = +XMKMF = +abs_builddir = /home/rodrigo/repo/mono/mono/arch/x86 +abs_srcdir = /home/rodrigo/repo/mono/mono/arch/x86 +abs_top_builddir = /home/rodrigo/repo/mono +abs_top_srcdir = /home/rodrigo/repo/mono +ac_ct_CC = gcc +ac_ct_CXX = g++ +ac_ct_F77 = +am__include = include +am__leading_dot = . +am__quote = +am__tar = tar --format=ustar -chf - "$$tardir" +am__untar = tar -xf - +arch_target = x86 +bindir = ${exec_prefix}/bin +build = i686-suse-linux-gnu +build_alias = +build_cpu = i686 +build_os = linux-gnu +build_vendor = suse +builddir = . +datadir = ${datarootdir} +datarootdir = ${prefix}/share +docdir = ${datarootdir}/doc/${PACKAGE} +dvidir = ${docdir} +eglib_dir = +exec_prefix = ${prefix} +export_ldflags = -Wl,--export-dynamic +host = i686-suse-linux-gnu +host_alias = +host_cpu = i686 +host_os = linux-gnu +host_vendor = suse +htmldir = ${docdir} +ikvm_native_dir = ikvm-native +includedir = ${prefix}/include +infodir = ${datarootdir}/info +install_sh = $(SHELL) /home/rodrigo/repo/mono/install-sh +libdir = ${exec_prefix}/lib +libexecdir = ${exec_prefix}/libexec +libgc_dir = libgc +libgdiplus_loc = +libmono_cflags = -D_REENTRANT +libmono_ldflags = -lpthread -lrt -ldl -lpthread -lm +libsuffix = .so +localedir = ${datarootdir}/locale +localstatedir = ${prefix}/var +mandir = ${datarootdir}/man +mcs_topdir = $(top_srcdir)/../mcs +mcs_topdir_from_srcdir = $(top_builddir)/../mcs +mkdir_p = /bin/mkdir -p +mono_build_root = /home/rodrigo/repo/mono +mono_cfg_dir = /home/rodrigo/repo/mono/runtime/etc +mono_runtime = mono/mini/mono +oldincludedir = /usr/include +pdfdir = ${docdir} +prefix = /home/rodrigo/repo/install/ +program_transform_name = s,x,x, +psdir = ${docdir} +reloc_libdir = lib +sbindir = ${exec_prefix}/sbin +sharedstatedir = ${prefix}/com +srcdir = . +subdirs = libgc +sysconfdir = ${prefix}/etc +target = i686-suse-linux-gnu +target_alias = +target_cpu = i686 +target_os = linux-gnu +target_vendor = suse +top_builddir = ../../.. +top_srcdir = ../../.. +#INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +#noinst_LTLIBRARIES = libmonoarch-x86.la +#libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign mono/arch/x86/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign mono/arch/x86/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libmonoarch-x86.la: $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_DEPENDENCIES) + $(if $(V),,@echo -e "LD\t$@";) $(LINK) $(am_libmonoarch_x86_la_rpath) $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +include ./$(DEPDIR)/tramp.Plo + +.c.o: + $(if $(V),,@echo -e "CC\t$@";) $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< + $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +# source='$<' object='$@' libtool=no \ +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ +# $(COMPILE) -c $< + +.c.obj: + $(if $(V),,@echo -e "CC\t$@";) $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` + $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +# source='$<' object='$@' libtool=no \ +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ +# $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: + $(if $(V),,@echo -e "CC\t$@";) $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< + $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +# source='$<' object='$@' libtool=yes \ +# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ +# $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/x86/Makefile.in b/x86/Makefile.in new file mode 100644 index 0000000..ef5a245 --- /dev/null +++ b/x86/Makefile.in @@ -0,0 +1,506 @@ +# Makefile.in generated by automake 1.10 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = mono/arch/x86 +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/nls.m4 $(top_srcdir)/po.m4 \ + $(top_srcdir)/progtest.m4 $(top_srcdir)/acinclude.m4 \ + $(top_srcdir)/configure.in +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libmonoarch_x86_la_LIBADD = +am__libmonoarch_x86_la_SOURCES_DIST = tramp.c x86-codegen.h +@INTERP_SUPPORTED_TRUE@am_libmonoarch_x86_la_OBJECTS = tramp.lo +libmonoarch_x86_la_OBJECTS = $(am_libmonoarch_x86_la_OBJECTS) +@INTERP_SUPPORTED_TRUE@am_libmonoarch_x86_la_rpath = +DEFAULT_INCLUDES = -I. -I$(top_builddir)@am__isrc@ +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libmonoarch_x86_la_SOURCES) +DIST_SOURCES = $(am__libmonoarch_x86_la_SOURCES_DIST) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +API_VER = @API_VER@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BASE_DEPENDENCIES_CFLAGS = @BASE_DEPENDENCIES_CFLAGS@ +BASE_DEPENDENCIES_LIBS = @BASE_DEPENDENCIES_LIBS@ +BISON = @BISON@ +BUILD_EXEEXT = @BUILD_EXEEXT@ +BUILD_GLIB_CFLAGS = @BUILD_GLIB_CFLAGS@ +BUILD_GLIB_LIBS = @BUILD_GLIB_LIBS@ +CC = @CC@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CC_FOR_BUILD = @CC_FOR_BUILD@ +CFLAGS = @CFLAGS@ +CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DISABLE_SHARED_HANDLES = @DISABLE_SHARED_HANDLES@ +DLLTOOL = @DLLTOOL@ +DOLT_BASH = @DOLT_BASH@ +DTRACE = @DTRACE@ +DTRACEFLAGS = @DTRACEFLAGS@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FFLAGS = @FFLAGS@ +GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_LIBS = @GLIB_LIBS@ +GMODULE_CFLAGS = @GMODULE_CFLAGS@ +GMODULE_LIBS = @GMODULE_LIBS@ +GMSGFMT = @GMSGFMT@ +GMSGFMT_015 = @GMSGFMT_015@ +GREP = @GREP@ +HAVE_BOEHM_GC = @HAVE_BOEHM_GC@ +HOST_CC = @HOST_CC@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTL = @INTL@ +LDFLAGS = @LDFLAGS@ +LIBC = @LIBC@ +LIBGC_CFLAGS = @LIBGC_CFLAGS@ +LIBGC_LIBS = @LIBGC_LIBS@ +LIBGC_STATIC_LIBS = @LIBGC_STATIC_LIBS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LN_S = @LN_S@ +LTCOMPILE = @LTCOMPILE@ +LTCXXCOMPILE = @LTCXXCOMPILE@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MONO_DL_NEED_USCORE = @MONO_DL_NEED_USCORE@ +MSGFMT = @MSGFMT@ +MSGFMT_015 = @MSGFMT_015@ +MSGMERGE = @MSGMERGE@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPROFILE_CFLAGS = @OPROFILE_CFLAGS@ +OPROFILE_LIBS = @OPROFILE_LIBS@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SQLITE = @SQLITE@ +SQLITE3 = @SQLITE3@ +STRIP = @STRIP@ +USE_NLS = @USE_NLS@ +VERSION = @VERSION@ +X11 = @X11@ +XATTR_LIB = @XATTR_LIB@ +XGETTEXT = @XGETTEXT@ +XGETTEXT_015 = @XGETTEXT_015@ +XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ +XMKMF = @XMKMF@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arch_target = @arch_target@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +eglib_dir = @eglib_dir@ +exec_prefix = @exec_prefix@ +export_ldflags = @export_ldflags@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ikvm_native_dir = @ikvm_native_dir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgc_dir = @libgc_dir@ +libgdiplus_loc = @libgdiplus_loc@ +libmono_cflags = @libmono_cflags@ +libmono_ldflags = @libmono_ldflags@ +libsuffix = @libsuffix@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mcs_topdir = @mcs_topdir@ +mcs_topdir_from_srcdir = @mcs_topdir_from_srcdir@ +mkdir_p = @mkdir_p@ +mono_build_root = @mono_build_root@ +mono_cfg_dir = @mono_cfg_dir@ +mono_runtime = @mono_runtime@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +reloc_libdir = @reloc_libdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +subdirs = @subdirs@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +@INTERP_SUPPORTED_TRUE@INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +@INTERP_SUPPORTED_TRUE@noinst_LTLIBRARIES = libmonoarch-x86.la +@INTERP_SUPPORTED_TRUE@libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign mono/arch/x86/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign mono/arch/x86/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libmonoarch-x86.la: $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_DEPENDENCIES) + $(LINK) $(am_libmonoarch_x86_la_rpath) $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tramp.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) ' { files[$$0] = 1; } \ + END { for (i in files) print i; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/x86/d b/x86/d new file mode 100644 index 0000000..2d34df2 --- /dev/null +++ b/x86/d @@ -0,0 +1,103 @@ +diff --cc mono/arch/x86/x86-codegen.h +index 59a230f,5333488..0000000 +--- a/mono/arch/x86/x86-codegen.h ++++ b/mono/arch/x86/x86-codegen.h +@@@ -1745,45 -1745,38 +1745,7 @@@ typedef enum + *(inst)++ = (unsigned char)0x2c; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) +-- +- #define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ +- do { \ +- *(inst)++ = (unsigned char)0x0F; \ +- *(inst)++ = (unsigned char)(opc); \ +- x86_reg_emit ((inst), (dreg), (reg)); \ +- } while (0) +- +- #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ +- do { \ +- *(inst)++ = (unsigned char)0x66; \ +- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ +- } while (0) +- +--#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ +-- do { \ +- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ +- } while (0) +- +- #define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ +- do { \ +- *(inst)++ = (unsigned char)0xF2; \ +- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ +- } while (0) +- +- #define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ +- do { \ +- *(inst)++ = (unsigned char)0xF3; \ +- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ +- } while (0) +- +- #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ +- do { \ +- *(inst)++ = (unsigned char)0x0f; \ +- *(inst)++ = (unsigned char)0x10; \ +- x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ +- } while (0) +- +- #define x86_movups_membase_reg(inst,basereg,disp,reg) \ + - *(inst)++ = (unsigned char)0x0F; \ + - *(inst)++ = (unsigned char)(opc); \ + - x86_reg_emit ((inst), (dreg), (reg)); \ + - } while (0) + - + -#define x86_sse3_alu_ps_reg_reg(inst,opc,dreg,reg) \ + - do { \ + - *(inst)++ = (unsigned char)0xF2; \ + - *(inst)++ = (unsigned char)0x0F; \ + - *(inst)++ = (unsigned char)(opc); \ + - x86_reg_emit ((inst), (dreg), (reg)); \ + - } while (0) + - + -#define x86_sse_alu_i_reg_reg(inst,opc,dreg,reg) \ + - do { \ + - *(inst)++ = (unsigned char)0x66; \ + - *(inst)++ = (unsigned char)0x0F; \ + - *(inst)++ = (unsigned char)(opc); \ + - x86_reg_emit ((inst), (dreg), (reg)); \ + - } while (0) + - + -#define x86_movups_reg_membase(inst,sreg,basereg,disp) \ + - do { \ + - *(inst)++ = (unsigned char)0x0f; \ + - *(inst)++ = (unsigned char)0x10; \ + - x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + - } while (0) + - + -#define x86_movups_membase_reg(inst,basereg,disp,reg) \ +++e_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ +@@@ -1804,14 -1797,15 +1766,6 @@@ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +- #define x86_movaps_reg_reg(inst,dreg,sreg) \ +- do { \ +- *(inst)++ = (unsigned char)0x0f; \ +- *(inst)++ = (unsigned char)0x28; \ +- x86_reg_emit ((inst), (dreg), (sreg)); \ +- } while (0) +-- +- + -#define x86_movups_reg_reg(inst,dreg,sreg) \ + - do { \ + - *(inst)++ = (unsigned char)0x0f; \ + - *(inst)++ = (unsigned char)0x10; \ + - x86_reg_emit ((inst), (dreg), (sreg)); \ + - } while (0) + - + - + #define x86_movd_reg_xreg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index adce9f4..26c03a3 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1752,6 +1752,10 @@ typedef enum { X86_SSE_PMAXUW = 0x3E, /*sse41*/ X86_SSE_PMAXUD = 0x3F, /*sse41*/ + X86_SSE_PMINSB = 0x38, /*sse41*/ + X86_SSE_PMINSW = 0x39, + X86_SSE_PMINSD = 0xEA,/*sse41*/ + X86_SSE_PMINUB = 0xDA, X86_SSE_PMINUW = 0x3A, /*sse41*/ X86_SSE_PMINUD = 0x3B, /*sse41*/ @@ -1762,7 +1766,11 @@ typedef enum { X86_SSE_PCMPEQB = 0x74, X86_SSE_PCMPEQW = 0x75, X86_SSE_PCMPEQD = 0x76, - + + X86_SSE_PCMPGTB = 0x64, + X86_SSE_PCMPGTW = 0x65, + X86_SSE_PCMPGTD = 0x66, + X86_SSE_PSADBW = 0xf6, X86_SSE_PSHUFD = 0x70, @@ -1785,6 +1793,11 @@ typedef enum { X86_SSE_PSUBUSB = 0xD8, X86_SSE_PSUBUSW = 0xD9, + X86_SSE_PADDSB = 0xEC, + X86_SSE_PADDSW = 0xED, + X86_SSE_PSUBSB = 0xE8, + X86_SSE_PSUBSW = 0xE9, + X86_SSE_PMULLW = 0xD5, X86_SSE_PMULLD = 0x40,/*sse41*/ X86_SSE_PMULHUW = 0xE4, diff --git a/x86/x86-codegen.h~ b/x86/x86-codegen.h~ new file mode 100644 index 0000000..a51c0fe --- /dev/null +++ b/x86/x86-codegen.h~ @@ -0,0 +1,1905 @@ +/* + * x86-codegen.h: Macros for generating x86 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef X86_H +#define X86_H +#include +/* +// x86 register numbers +*/ +typedef enum { + X86_EAX = 0, + X86_ECX = 1, + X86_EDX = 2, + X86_EBX = 3, + X86_ESP = 4, + X86_EBP = 5, + X86_ESI = 6, + X86_EDI = 7, + X86_NREG +} X86_Reg_No; + +typedef enum { + X86_XMM0, + X86_XMM1, + X86_XMM2, + X86_XMM3, + X86_XMM4, + X86_XMM5, + X86_XMM6, + X86_XMM7, + X86_XMM_NREG +} X86_XMM_Reg_No; + +/* +// opcodes for alu instructions +*/ +typedef enum { + X86_ADD = 0, + X86_OR = 1, + X86_ADC = 2, + X86_SBB = 3, + X86_AND = 4, + X86_SUB = 5, + X86_XOR = 6, + X86_CMP = 7, + X86_NALU +} X86_ALU_Opcode; +/* +// opcodes for shift instructions +*/ +typedef enum { + X86_SHLD, + X86_SHLR, + X86_ROL = 0, + X86_ROR = 1, + X86_RCL = 2, + X86_RCR = 3, + X86_SHL = 4, + X86_SHR = 5, + X86_SAR = 7, + X86_NSHIFT = 8 +} X86_Shift_Opcode; +/* +// opcodes for floating-point instructions +*/ +typedef enum { + X86_FADD = 0, + X86_FMUL = 1, + X86_FCOM = 2, + X86_FCOMP = 3, + X86_FSUB = 4, + X86_FSUBR = 5, + X86_FDIV = 6, + X86_FDIVR = 7, + X86_NFP = 8 +} X86_FP_Opcode; +/* +// integer conditions codes +*/ +typedef enum { + X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0, + X86_CC_NE = 1, X86_CC_NZ = 1, + X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2, + X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3, + X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4, + X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5, + X86_CC_LZ = 6, X86_CC_S = 6, + X86_CC_GEZ = 7, X86_CC_NS = 7, + X86_CC_P = 8, X86_CC_PE = 8, + X86_CC_NP = 9, X86_CC_PO = 9, + X86_CC_O = 10, + X86_CC_NO = 11, + X86_NCC +} X86_CC; + +/* FP status */ +enum { + X86_FP_C0 = 0x100, + X86_FP_C1 = 0x200, + X86_FP_C2 = 0x400, + X86_FP_C3 = 0x4000, + X86_FP_CC_MASK = 0x4500 +}; + +/* FP control word */ +enum { + X86_FPCW_INVOPEX_MASK = 0x1, + X86_FPCW_DENOPEX_MASK = 0x2, + X86_FPCW_ZERODIV_MASK = 0x4, + X86_FPCW_OVFEX_MASK = 0x8, + X86_FPCW_UNDFEX_MASK = 0x10, + X86_FPCW_PRECEX_MASK = 0x20, + X86_FPCW_PRECC_MASK = 0x300, + X86_FPCW_ROUNDC_MASK = 0xc00, + + /* values for precision control */ + X86_FPCW_PREC_SINGLE = 0, + X86_FPCW_PREC_DOUBLE = 0x200, + X86_FPCW_PREC_EXTENDED = 0x300, + + /* values for rounding control */ + X86_FPCW_ROUND_NEAREST = 0, + X86_FPCW_ROUND_DOWN = 0x400, + X86_FPCW_ROUND_UP = 0x800, + X86_FPCW_ROUND_TOZERO = 0xc00 +}; + +/* +// prefix code +*/ +typedef enum { + X86_LOCK_PREFIX = 0xF0, + X86_REPNZ_PREFIX = 0xF2, + X86_REPZ_PREFIX = 0xF3, + X86_REP_PREFIX = 0xF3, + X86_CS_PREFIX = 0x2E, + X86_SS_PREFIX = 0x36, + X86_DS_PREFIX = 0x3E, + X86_ES_PREFIX = 0x26, + X86_FS_PREFIX = 0x64, + X86_GS_PREFIX = 0x65, + X86_UNLIKELY_PREFIX = 0x2E, + X86_LIKELY_PREFIX = 0x3E, + X86_OPERAND_PREFIX = 0x66, + X86_ADDRESS_PREFIX = 0x67 +} X86_Prefix; + +static const unsigned char +x86_cc_unsigned_map [X86_NCC] = { + 0x74, /* eq */ + 0x75, /* ne */ + 0x72, /* lt */ + 0x76, /* le */ + 0x77, /* gt */ + 0x73, /* ge */ + 0x78, /* lz */ + 0x79, /* gez */ + 0x7a, /* p */ + 0x7b, /* np */ + 0x70, /* o */ + 0x71, /* no */ +}; + +static const unsigned char +x86_cc_signed_map [X86_NCC] = { + 0x74, /* eq */ + 0x75, /* ne */ + 0x7c, /* lt */ + 0x7e, /* le */ + 0x7f, /* gt */ + 0x7d, /* ge */ + 0x78, /* lz */ + 0x79, /* gez */ + 0x7a, /* p */ + 0x7b, /* np */ + 0x70, /* o */ + 0x71, /* no */ +}; + +typedef union { + int val; + unsigned char b [4]; +} x86_imm_buf; + +#define X86_NOBASEREG (-1) + +/* +// bitvector mask for callee-saved registers +*/ +#define X86_ESI_MASK (1<> 6) +#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define x86_modrm_rm(modrm) ((modrm) & 0x7) + +#define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0) +#define x86_imm_emit32(inst,imm) \ + do { \ + x86_imm_buf imb; imb.val = (int) (imm); \ + *(inst)++ = imb.b [0]; \ + *(inst)++ = imb.b [1]; \ + *(inst)++ = imb.b [2]; \ + *(inst)++ = imb.b [3]; \ + } while (0) +#define x86_imm_emit16(inst,imm) do { *(short*)(inst) = (imm); (inst) += 2; } while (0) +#define x86_imm_emit8(inst,imm) do { *(inst) = (unsigned char)((imm) & 0xff); ++(inst); } while (0) +#define x86_is_imm8(imm) (((int)(imm) >= -128 && (int)(imm) <= 127)) +#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1))) + +#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0) +#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0) +#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0) +#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) + +#define x86_membase_emit(inst,r,basereg,disp) do {\ + if ((basereg) == X86_ESP) { \ + if ((disp) == 0) { \ + x86_address_byte ((inst), 0, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + } else if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), X86_ESP); \ + x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + break; \ + } \ + if ((disp) == 0 && (basereg) != X86_EBP) { \ + x86_address_byte ((inst), 0, (r), (basereg)); \ + break; \ + } \ + if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), (basereg)); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), (basereg)); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + } while (0) + +#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ + do { \ + if ((basereg) == X86_NOBASEREG) { \ + x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } else if ((disp) == 0 && (basereg) != X86_EBP) { \ + x86_address_byte ((inst), 0, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + } else if (x86_is_imm8((disp))) { \ + x86_address_byte ((inst), 1, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ + x86_imm_emit8 ((inst), (disp)); \ + } else { \ + x86_address_byte ((inst), 2, (r), 4); \ + x86_address_byte ((inst), (shift), (indexreg), 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + } while (0) + +/* + * target is the position in the code where to jump to: + * target = code; + * .. output loop code... + * x86_mov_reg_imm (code, X86_EAX, 0); + * loop = code; + * x86_loop (code, -1); + * ... finish method + * + * patch displacement + * x86_patch (loop, target); + * + * ins should point at the start of the instruction that encodes a target. + * the instruction is inspected for validity and the correct displacement + * is inserted. + */ +#define x86_patch(ins,target) \ + do { \ + unsigned char* pos = (ins) + 1; \ + int disp, size = 0; \ + switch (*(unsigned char*)(ins)) { \ + case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \ + case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \ + ++size; ++pos; break; /* prefix for 32-bit disp */ \ + case 0xe0: case 0xe1: case 0xe2: /* loop */ \ + case 0xeb: /* jump8 */ \ + /* conditional jump opcodes */ \ + case 0x70: case 0x71: case 0x72: case 0x73: \ + case 0x74: case 0x75: case 0x76: case 0x77: \ + case 0x78: case 0x79: case 0x7a: case 0x7b: \ + case 0x7c: case 0x7d: case 0x7e: case 0x7f: \ + break; \ + default: assert (0); \ + } \ + disp = (target) - pos; \ + if (size) x86_imm_emit32 (pos, disp - 4); \ + else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \ + else assert (0); \ + } while (0) + +#define x86_breakpoint(inst) \ + do { \ + *(inst)++ = 0xcc; \ + } while (0) + +#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0) +#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0) +#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0) +#define x86_stosd(inst) x86_stosl((inst)) +#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0) +#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) +#define x86_movsd(inst) x86_movsl((inst)) + +#define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) + +#define x86_rdtsc(inst) \ + do { \ + *(inst)++ = 0x0f; \ + *(inst)++ = 0x31; \ + } while (0) + +#define x86_cmpxchg_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_cmpxchg_mem_reg(inst,mem,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xb1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_xchg_reg_reg(inst,dreg,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_xchg_mem_reg(inst,mem,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0x86; \ + else \ + *(inst)++ = (unsigned char)0x87; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_xadd_reg_reg(inst,dreg,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_xadd_mem_reg(inst,mem,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + if ((size) == 1) \ + *(inst)++ = (unsigned char)0xC0; \ + else \ + *(inst)++ = (unsigned char)0xC1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_inc_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_inc_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0) + +#define x86_dec_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 1, (mem)); \ + } while (0) + +#define x86_dec_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 1, (basereg), (disp)); \ + } while (0) + +#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0) + +#define x86_not_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 2, (mem)); \ + } while (0) + +#define x86_not_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } while (0) + +#define x86_not_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 2, (reg)); \ + } while (0) + +#define x86_neg_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 3, (mem)); \ + } while (0) + +#define x86_neg_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 3, (basereg), (disp)); \ + } while (0) + +#define x86_neg_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 3, (reg)); \ + } while (0) + +#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0) + +#define x86_alu_reg_imm(inst,opc,reg,imm) \ + do { \ + if ((reg) == X86_EAX) { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ + x86_imm_emit32 ((inst), (imm)); \ + break; \ + } \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_mem_imm(inst,opc,mem,imm) \ + do { \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \ + do { \ + if (x86_is_imm8((imm))) { \ + *(inst)++ = (unsigned char)0x83; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x81; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \ + do { \ + *(inst)++ = (unsigned char)0x80; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_alu_mem_reg(inst,opc,mem,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_alu_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +/** + * @x86_alu_reg8_reg8: + * Supports ALU operations between two 8-bit registers. + * dreg := dreg opc reg + * X86_Reg_No enum is used to specify the registers. + * Additionally is_*_h flags are used to specify what part + * of a given 32-bit register is used - high (TRUE) or low (FALSE). + * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH + */ +#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \ + x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \ + } while (0) + +#define x86_alu_reg_mem(inst,opc,reg,mem) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \ + do { \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_test_reg_imm(inst,reg,imm) \ + do { \ + if ((reg) == X86_EAX) { \ + *(inst)++ = (unsigned char)0xa9; \ + } else { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 0, (reg)); \ + } \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_mem_imm(inst,mem,imm) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_membase_imm(inst,basereg,disp,imm) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_test_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_test_mem_reg(inst,mem,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_test_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x85; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_shift_reg_imm(inst,opc,reg,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_reg_emit ((inst), (opc), (reg)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_mem_imm(inst,opc,mem,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_mem_emit ((inst), (opc), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \ + do { \ + if ((imm) == 1) { \ + *(inst)++ = (unsigned char)0xd1; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xc1; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_shift_reg(inst,opc,reg) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_reg_emit ((inst), (opc), (reg)); \ + } while (0) + +#define x86_shift_mem(inst,opc,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } while (0) + +#define x86_shift_membase(inst,opc,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd3; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } while (0) + +/* + * Multi op shift missing. + */ + +#define x86_shrd_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xad; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xac; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + x86_imm_emit8 ((inst), (shamt)); \ + } while (0) + +#define x86_shld_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xa5; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + } while (0) + +#define x86_shld_reg_imm(inst,dreg,reg,shamt) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xa4; \ + x86_reg_emit ((inst), (reg), (dreg)); \ + x86_imm_emit8 ((inst), (shamt)); \ + } while (0) + +/* + * EDX:EAX = EAX * rm + */ +#define x86_mul_reg(inst,reg,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \ + } while (0) + +#define x86_mul_mem(inst,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_mul_membase(inst,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +/* + * r *= rm + */ +#define x86_imul_reg_reg(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_imul_reg_mem(inst,reg,mem) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_imul_reg_membase(inst,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0xaf; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +/* + * dreg = rm * imm + */ +#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_mem_emit ((inst), (reg), (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_reg_emit ((inst), (reg), (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \ + do { \ + if (x86_is_imm8 ((imm))) { \ + *(inst)++ = (unsigned char)0x6b; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x69; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +/* + * divide EDX:EAX by rm; + * eax = quotient, edx = remainder + */ + +#define x86_div_reg(inst,reg,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \ + } while (0) + +#define x86_div_mem(inst,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_div_membase(inst,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0xf7; \ + x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +#define x86_mov_mem_reg(inst,mem,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_mov_regp_reg(inst,regp,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_regp_emit ((inst), (reg), (regp)); \ + } while (0) + +#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + +#define x86_mov_reg_reg(inst,dreg,reg,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_mov_reg_mem(inst,reg,mem,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 4: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + +/* + * Note: x86_clear_reg () chacnges the condition code! + */ +#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg)) + +#define x86_mov_reg_imm(inst,reg,imm) \ + do { \ + *(inst)++ = (unsigned char)0xb8 + (reg); \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_mov_mem_imm(inst,mem,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \ + do { \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0xc7; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_lea_mem(inst,reg,mem) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_lea_membase(inst,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \ + do { \ + *(inst)++ = (unsigned char)0x8d; \ + x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + +#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + g_assert (is_half || X86_IS_BYTE_REG (reg)); \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_mem_emit ((inst), (dreg), (mem)); \ + } while (0) + +#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ + } while (0) + +#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \ + do { \ + unsigned char op = 0xb6; \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) op += 0x08; \ + if ((is_half)) op += 0x01; \ + *(inst)++ = op; \ + x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \ + } while (0) + +#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0) +#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0) + +#define x86_fp_op_mem(inst,opc,mem,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ + x86_mem_emit ((inst), (opc), (mem)); \ + } while (0) + +#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ + x86_membase_emit ((inst), (opc), (basereg), (disp)); \ + } while (0) + +#define x86_fp_op(inst,opc,index) \ + do { \ + *(inst)++ = (unsigned char)0xd8; \ + *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \ + } while (0) + +#define x86_fp_op_reg(inst,opc,index,pop_stack) \ + do { \ + static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \ + *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \ + *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \ + } while (0) + +/** + * @x86_fp_int_op_membase + * Supports FPU operations between ST(0) and integer operand in memory. + * Operation encoded using X86_FP_Opcode enum. + * Operand is addressed by [basereg + disp]. + * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE). + */ +#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \ + do { \ + *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \ + x86_membase_emit ((inst), opc, (basereg), (disp)); \ + } while (0) + +#define x86_fstp(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdd; \ + *(inst)++ = (unsigned char)0xd8+(index); \ + } while (0) + +#define x86_fcompp(inst) \ + do { \ + *(inst)++ = (unsigned char)0xde; \ + *(inst)++ = (unsigned char)0xd9; \ + } while (0) + +#define x86_fucompp(inst) \ + do { \ + *(inst)++ = (unsigned char)0xda; \ + *(inst)++ = (unsigned char)0xe9; \ + } while (0) + +#define x86_fnstsw(inst) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + +#define x86_fnstcw(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_mem_emit ((inst), 7, (mem)); \ + } while (0) + +#define x86_fnstcw_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } while (0) + +#define x86_fldcw(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_mem_emit ((inst), 5, (mem)); \ + } while (0) + +#define x86_fldcw_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } while (0) + +#define x86_fchs(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + +#define x86_frem(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xf8; \ + } while (0) + +#define x86_fxch(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \ + } while (0) + +#define x86_fcomi(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ + } while (0) + +#define x86_fcomip(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ + } while (0) + +#define x86_fucomi(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ + } while (0) + +#define x86_fucomip(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ + } while (0) + +#define x86_fld(inst,mem,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_fld_membase(inst,basereg,disp,is_double) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_fld80_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 5, (mem)); \ + } while (0) + +#define x86_fld80_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } while (0) + +#define x86_fild(inst,mem,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_mem_emit ((inst), 5, (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 0, (mem)); \ + } \ + } while (0) + +#define x86_fild_membase(inst,basereg,disp,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 5, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } \ + } while (0) + +#define x86_fld_reg(inst,index) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \ + } while (0) + +#define x86_fldz(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xee; \ + } while (0) + +#define x86_fld1(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xe8; \ + } while (0) + +#define x86_fldpi(inst) \ + do { \ + *(inst)++ = (unsigned char)0xd9; \ + *(inst)++ = (unsigned char)0xeb; \ + } while (0) + +#define x86_fst(inst,mem,is_double,pop_stack) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ + x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \ + } while (0) + +#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \ + do { \ + *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ + x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \ + } while (0) + +#define x86_fst80_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 7, (mem)); \ + } while (0) + + +#define x86_fst80_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } while (0) + + +#define x86_fist_pop(inst,mem,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_mem_emit ((inst), 7, (mem)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_mem_emit ((inst), 3, (mem)); \ + } \ + } while (0) + +#define x86_fist_pop_membase(inst,basereg,disp,is_long) \ + do { \ + if ((is_long)) { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 7, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 3, (basereg), (disp)); \ + } \ + } while (0) + +#define x86_fstsw(inst) \ + do { \ + *(inst)++ = (unsigned char)0x9b; \ + *(inst)++ = (unsigned char)0xdf; \ + *(inst)++ = (unsigned char)0xe0; \ + } while (0) + +/** + * @x86_fist_membase + * Converts content of ST(0) to integer and stores it at memory location + * addressed by [basereg + disp]. + * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE). + */ +#define x86_fist_membase(inst,basereg,disp,is_int) \ + do { \ + if ((is_int)) { \ + *(inst)++ = (unsigned char)0xdb; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } else { \ + *(inst)++ = (unsigned char)0xdf; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } \ + } while (0) + + +#define x86_push_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0x50 + (reg); \ + } while (0) + +#define x86_push_regp(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_regp_emit ((inst), 6, (reg)); \ + } while (0) + +#define x86_push_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 6, (mem)); \ + } while (0) + +#define x86_push_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 6, (basereg), (disp)); \ + } while (0) + +#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \ + } while (0) + +#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0) + +#define x86_push_imm(inst,imm) \ + do { \ + int _imm = (int) (imm); \ + if (x86_is_imm8 (_imm)) { \ + *(inst)++ = (unsigned char)0x6A; \ + x86_imm_emit8 ((inst), (_imm)); \ + } else { \ + *(inst)++ = (unsigned char)0x68; \ + x86_imm_emit32 ((inst), (_imm)); \ + } \ + } while (0) + +#define x86_pop_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0x58 + (reg); \ + } while (0) + +#define x86_pop_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0x87; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_pop_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x87; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0) +#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0) +#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0) +#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0) + +#define x86_loop(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe2; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_loope(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe1; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_loopne(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe0; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_jump32(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xe9; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_jump8(inst,imm) \ + do { \ + *(inst)++ = (unsigned char)0xeb; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_jump_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 4, (reg)); \ + } while (0) + +#define x86_jump_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 4, (mem)); \ + } while (0) + +#define x86_jump_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 4, (basereg), (disp)); \ + } while (0) + +/* + * target is a pointer in our buffer. + */ +#define x86_jump_code(inst,target) \ + do { \ + int t = (unsigned char*)(target) - (inst) - 2; \ + if (x86_is_imm8(t)) { \ + x86_jump8 ((inst), t); \ + } else { \ + t -= 3; \ + x86_jump32 ((inst), t); \ + } \ + } while (0) + +#define x86_jump_disp(inst,disp) \ + do { \ + int t = (disp) - 2; \ + if (x86_is_imm8(t)) { \ + x86_jump8 ((inst), t); \ + } else { \ + t -= 3; \ + x86_jump32 ((inst), t); \ + } \ + } while (0) + +#define x86_branch8(inst,cond,imm,is_signed) \ + do { \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)]; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)]; \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_branch32(inst,cond,imm,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ + x86_imm_emit32 ((inst), (imm)); \ + } while (0) + +#define x86_branch(inst,cond,target,is_signed) \ + do { \ + int offset = (target) - (inst) - 2; \ + if (x86_is_imm8 ((offset))) \ + x86_branch8 ((inst), (cond), offset, (is_signed)); \ + else { \ + offset -= 4; \ + x86_branch32 ((inst), (cond), offset, (is_signed)); \ + } \ + } while (0) + +#define x86_branch_disp(inst,cond,disp,is_signed) \ + do { \ + int offset = (disp) - 2; \ + if (x86_is_imm8 ((offset))) \ + x86_branch8 ((inst), (cond), offset, (is_signed)); \ + else { \ + offset -= 4; \ + x86_branch32 ((inst), (cond), offset, (is_signed)); \ + } \ + } while (0) + +#define x86_set_reg(inst,cond,reg,is_signed) \ + do { \ + g_assert (X86_IS_BYTE_REG (reg)); \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_reg_emit ((inst), 0, (reg)); \ + } while (0) + +#define x86_set_mem(inst,cond,mem,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_mem_emit ((inst), 0, (mem)); \ + } while (0) + +#define x86_set_membase(inst,cond,basereg,disp,is_signed) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ + x86_membase_emit ((inst), 0, (basereg), (disp)); \ + } while (0) + +#define x86_call_imm(inst,disp) \ + do { \ + *(inst)++ = (unsigned char)0xe8; \ + x86_imm_emit32 ((inst), (int)(disp)); \ + } while (0) + +#define x86_call_reg(inst,reg) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 2, (reg)); \ + } while (0) + +#define x86_call_mem(inst,mem) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_mem_emit ((inst), 2, (mem)); \ + } while (0) + +#define x86_call_membase(inst,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 2, (basereg), (disp)); \ + } while (0) + +#define x86_call_code(inst,target) \ + do { \ + int _x86_offset = (unsigned char*)(target) - (inst); \ + _x86_offset -= 5; \ + x86_call_imm ((inst), _x86_offset); \ + } while (0) + +#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) + +#define x86_ret_imm(inst,imm) \ + do { \ + if ((imm) == 0) { \ + x86_ret ((inst)); \ + } else { \ + *(inst)++ = (unsigned char)0xc2; \ + x86_imm_emit16 ((inst), (imm)); \ + } \ + } while (0) + +#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_mem_emit ((inst), (reg), (mem)); \ + } while (0) + +#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char) 0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_enter(inst,framesize) \ + do { \ + *(inst)++ = (unsigned char)0xc8; \ + x86_imm_emit16 ((inst), (framesize)); \ + *(inst)++ = 0; \ + } while (0) + +#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) +#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0) + +#define x86_fsin(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) +#define x86_fcos(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) +#define x86_fabs(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) +#define x86_ftst(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0) +#define x86_fxam(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0) +#define x86_fpatan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) +#define x86_fprem(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) +#define x86_fprem1(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) +#define x86_frndint(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0) +#define x86_fsqrt(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0) +#define x86_fptan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0) + +#define x86_padding(inst,size) \ + do { \ + switch ((size)) { \ + case 1: x86_nop ((inst)); break; \ + case 2: *(inst)++ = 0x8b; \ + *(inst)++ = 0xc0; break; \ + case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \ + *(inst)++ = 0x00; break; \ + case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + break; \ + case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + x86_nop ((inst)); break; \ + case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + break; \ + case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \ + *(inst)++ = 0x24; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; *(inst)++ = 0x00; \ + *(inst)++ = 0x00; break; \ + default: assert (0); \ + } \ + } while (0) + +#define x86_prolog(inst,frame_size,reg_mask) \ + do { \ + unsigned i, m = 1; \ + x86_enter ((inst), (frame_size)); \ + for (i = 0; i < X86_NREG; ++i, m <<= 1) { \ + if ((reg_mask) & m) \ + x86_push_reg ((inst), i); \ + } \ + } while (0) + +#define x86_epilog(inst,reg_mask) \ + do { \ + unsigned i, m = 1 << X86_EDI; \ + for (i = X86_EDI; m != 0; i--, m=m>>1) { \ + if ((reg_mask) & m) \ + x86_pop_reg ((inst), i); \ + } \ + x86_leave ((inst)); \ + x86_ret ((inst)); \ + } while (0) + + +typedef enum { + X86_SSE_SQRT = 0x51, + X86_SSE_RSQRT = 0x52, + X86_SSE_ADD = 0x58, + X86_SSE_DIV = 0x5E, + X86_SSE_MUL = 0x59, + X86_SSE_SUB = 0x5C, + X86_SSE_MIN = 0x5D, + X86_SSE_MAX = 0x5F, + + X86_SSE_ADDSUB = 0xD0, + X86_SSE_HADD = 0x7C, + X86_SSE_HSUB = 0x7D, + + X86_SSE_PAND = 0xDB, + X86_SSE_POR = 0xEB, + X86_SSE_PXOR = 0xEF, + + X86_SSE_PADDB = 0xFC, + X86_SSE_PADDW = 0xFD, + X86_SSE_PADDD = 0xFE, + + X86_SSE_PSUBB = 0xF8, + X86_SSE_PSUBW = 0xF9, + X86_SSE_PSUBD = 0xFA, + + X86_SSE_PUNPCKLBW = 0x60, + X86_SSE_PUNPCKLWD = 0x61, + X86_SSE_PUNPCKLDQ = 0x62, + X86_SSE_PUNPCKLQDQ = 0x6C, + + X86_SSE_PUNPCKHBW = 0x68, + X86_SSE_PUNPCKHWD = 0x69, + X86_SSE_PUNPCKHDQ = 0x6A, + X86_SSE_PUNPCKHQDQ = 0x6D, + + X86_SSE_PACKUSWB = 0x67, + X86_SSE_PACKUSDW = 0x2B,/*sse41*/ + + X86_SSE_PADDUSB = 0xDC, + X86_SSE_PADDUSW = 0xDD, + X86_SSE_PSUBUSB = 0xD8, + X86_SSE_PSUBUSW = 0xD9, + + X86_SSE_PMULLW = 0xD5, + X86_SSE_PMULLD = 0x40,/*sse41*/ + + X86_SSE_PSHIFTW = 0x71, + X86_SSE_PSHIFTD = 0x72, + X86_SSE_SHR = 2, + X86_SSE_SAR = 4, + X86_SSE_SHL = 6, + + X86_SSE_PSRLW_REG = 0xD1, + X86_SSE_PSRAW_REG = 0xE1, + X86_SSE_PSLLW_REG = 0xF1, + + X86_SSE_PSRLD_REG = 0xD2, + X86_SSE_PSRAD_REG = 0xE2, + X86_SSE_PSLLD_REG = 0xF2, + +} X86_SSE_Opcode; + + +/* minimal SSE* support */ +#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0xf2; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ + } while (0) + +#define x86_cvttsd2si(inst,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xf2; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x2c; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)(opc); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ + do { \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF2; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF3; \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + } while (0) + +#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)0x38; \ + *(inst)++ = (unsigned char)(opc); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + + +#define x86_movups_reg_membase(inst,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + +#define x86_movups_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x28; \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_membase_reg(inst,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x29; \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define x86_movaps_reg_reg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x28; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + } while (0) + + +#define x86_movd_reg_xreg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x7e; \ + x86_reg_emit ((inst), (sreg), (dreg)); \ + } while (0) + +#define x86_movd_xreg_reg(inst,dreg,sreg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x6e; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + } while (0) + +#define x86_pshufd_reg_reg(inst,dreg,sreg,mask) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x70; \ + x86_reg_emit ((inst), (dreg), (sreg)); \ + *(inst)++ = (unsigned char)mask; \ + } while (0) + +#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \ + do { \ + x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + +#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \ + do { \ + x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \ + } while (0) + +#endif // X86_H + -- cgit v1.1 From 2b6070d8bbd583f6bb90e02f3961252ef0854da8 Mon Sep 17 00:00:00 2001 From: Gonzalo Paniagua Javier Date: Fri, 24 Oct 2008 01:02:49 +0000 Subject: remove temporary/generated files svn path=/trunk/mono/; revision=116902 --- x86/.deps/tramp.Plo | 1 - x86/Makefile | 507 -------------- x86/Makefile.in | 506 -------------- x86/d | 103 --- x86/x86-codegen.h~ | 1905 --------------------------------------------------- 5 files changed, 3022 deletions(-) delete mode 100644 x86/.deps/tramp.Plo delete mode 100644 x86/Makefile delete mode 100644 x86/Makefile.in delete mode 100644 x86/d delete mode 100644 x86/x86-codegen.h~ diff --git a/x86/.deps/tramp.Plo b/x86/.deps/tramp.Plo deleted file mode 100644 index 9ce06a8..0000000 --- a/x86/.deps/tramp.Plo +++ /dev/null @@ -1 +0,0 @@ -# dummy diff --git a/x86/Makefile b/x86/Makefile deleted file mode 100644 index b892bd9..0000000 --- a/x86/Makefile +++ /dev/null @@ -1,507 +0,0 @@ -# Postprocessed with patch-quiet.sh -# Makefile.in generated by automake 1.10 from Makefile.am. -# mono/arch/x86/Makefile. Generated from Makefile.in by configure. - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - - - -pkgdatadir = $(datadir)/mono -pkglibdir = $(libdir)/mono -pkgincludedir = $(includedir)/mono -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = i686-suse-linux-gnu -host_triplet = i686-suse-linux-gnu -target_triplet = i686-suse-linux-gnu -subdir = mono/arch/x86 -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/nls.m4 $(top_srcdir)/po.m4 \ - $(top_srcdir)/progtest.m4 $(top_srcdir)/acinclude.m4 \ - $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -LTLIBRARIES = $(noinst_LTLIBRARIES) -libmonoarch_x86_la_LIBADD = -am__libmonoarch_x86_la_SOURCES_DIST = tramp.c x86-codegen.h -#am_libmonoarch_x86_la_OBJECTS = tramp.lo -libmonoarch_x86_la_OBJECTS = $(am_libmonoarch_x86_la_OBJECTS) -#am_libmonoarch_x86_la_rpath = -DEFAULT_INCLUDES = -I. -I$(top_builddir) -depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles quiet -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(LIBTOOL) $(if $(V),,--quiet) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ - $(LDFLAGS) -o $@ -SOURCES = $(libmonoarch_x86_la_SOURCES) -DIST_SOURCES = $(am__libmonoarch_x86_la_SOURCES_DIST) -ETAGS = etags -CTAGS = ctags -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = ${SHELL} /home/rodrigo/repo/mono/missing --run aclocal-1.10 -AMTAR = ${SHELL} /home/rodrigo/repo/mono/missing --run tar -API_VER = 1.0 -AR = ar -AS = as -AUTOCONF = ${SHELL} /home/rodrigo/repo/mono/missing --run autoconf -AUTOHEADER = ${SHELL} /home/rodrigo/repo/mono/missing --run autoheader -AUTOMAKE = ${SHELL} /home/rodrigo/repo/mono/missing --run automake-1.10 -AWK = gawk -BASE_DEPENDENCIES_CFLAGS = -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include -BASE_DEPENDENCIES_LIBS = -lglib-2.0 -BISON = yes -BUILD_EXEEXT = -BUILD_GLIB_CFLAGS = -pthread -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include -BUILD_GLIB_LIBS = -pthread -lgthread-2.0 -lrt -lglib-2.0 -CC = gcc -CCAS = gcc -CCASDEPMODE = depmode=gcc3 -CCASFLAGS = -g -O2 -CCDEPMODE = depmode=gcc3 -CC_FOR_BUILD = gcc -CFLAGS = -g -O2 -fno-strict-aliasing -Wdeclaration-after-statement -g -Wall -Wunused -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wmissing-prototypes -Wnested-externs -Wpointer-arith -Wno-cast-qual -Wcast-align -Wwrite-strings -mno-tls-direct-seg-refs -CFLAGS_FOR_BUILD = -g -O2 -CPP = gcc -E -CPPFLAGS = -DGC_LINUX_THREADS -D_GNU_SOURCE -D_REENTRANT -DUSE_MMAP -DUSE_MUNMAP -D_FILE_OFFSET_BITS=64 -DUSE_COMPILER_TLS -CXX = g++ -CXXCPP = g++ -E -CXXDEPMODE = depmode=gcc3 -CXXFLAGS = -g -O2 -CYGPATH_W = echo -DEFS = -DHAVE_CONFIG_H -DEPDIR = .deps -DISABLE_SHARED_HANDLES = -DLLTOOL = dlltool -DOLT_BASH = /bin/bash -DTRACE = -DTRACEFLAGS = -ECHO = echo -ECHO_C = -ECHO_N = -n -ECHO_T = -EGREP = /usr/bin/grep -E -EXEEXT = -F77 = -FFLAGS = -GETTEXT_MACRO_VERSION = 0.17 -GLIB_CFLAGS = -pthread -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include -GLIB_LIBS = -pthread -lgthread-2.0 -lrt -lglib-2.0 -GMODULE_CFLAGS = -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include -GMODULE_LIBS = -Wl,--export-dynamic -lgmodule-2.0 -ldl -lglib-2.0 -GMSGFMT = /usr/bin/msgfmt -GMSGFMT_015 = /usr/bin/msgfmt -GREP = /usr/bin/grep -HAVE_BOEHM_GC = -HOST_CC = -INSTALL = /usr/bin/install -c -INSTALL_DATA = ${INSTALL} -m 644 -INSTALL_PROGRAM = ${INSTALL} -INSTALL_SCRIPT = ${INSTALL} -INSTALL_STRIP_PROGRAM = $(install_sh) -c -s -INTL = libc.so.6 -LDFLAGS = -LIBC = libc.so.6 -LIBGC_CFLAGS = -I$(top_srcdir)/libgc/include -LIBGC_LIBS = $(top_builddir)/libgc/libmonogc.la -LIBGC_STATIC_LIBS = $(top_builddir)/libgc/libmonogc-static.la -LIBOBJS = -LIBS = -lrt -ldl -lpthread -lm -LIBTOOL = $(top_builddir)/doltlibtool -LN_S = ln -s -LTCOMPILE = $(top_builddir)/doltcompile $(COMPILE) -LTCXXCOMPILE = $(top_builddir)/doltcompile $(CXXCOMPILE) -LTLIBOBJS = -MAINT = -MAKEINFO = ${SHELL} /home/rodrigo/repo/mono/missing --run makeinfo -MKDIR_P = /bin/mkdir -p -MONO_DL_NEED_USCORE = 0 -MSGFMT = /usr/bin/msgfmt -MSGFMT_015 = /usr/bin/msgfmt -MSGMERGE = /usr/bin/msgmerge -OBJDUMP = objdump -OBJEXT = o -OPROFILE_CFLAGS = -OPROFILE_LIBS = -PACKAGE = mono -PACKAGE_BUGREPORT = -PACKAGE_NAME = -PACKAGE_STRING = -PACKAGE_TARNAME = -PACKAGE_VERSION = -PATH_SEPARATOR = : -PKG_CONFIG = /usr/bin/pkg-config -RANLIB = ranlib -SED = /usr/bin/sed -SET_MAKE = -SHELL = /bin/sh -SQLITE = libsqlite.so.0 -SQLITE3 = libsqlite3.so.0 -STRIP = strip -USE_NLS = yes -VERSION = 2.1 -X11 = libX11.so.6 -XATTR_LIB = -XGETTEXT = /usr/bin/xgettext -XGETTEXT_015 = /usr/bin/xgettext -XGETTEXT_EXTRA_OPTIONS = -XMKMF = -abs_builddir = /home/rodrigo/repo/mono/mono/arch/x86 -abs_srcdir = /home/rodrigo/repo/mono/mono/arch/x86 -abs_top_builddir = /home/rodrigo/repo/mono -abs_top_srcdir = /home/rodrigo/repo/mono -ac_ct_CC = gcc -ac_ct_CXX = g++ -ac_ct_F77 = -am__include = include -am__leading_dot = . -am__quote = -am__tar = tar --format=ustar -chf - "$$tardir" -am__untar = tar -xf - -arch_target = x86 -bindir = ${exec_prefix}/bin -build = i686-suse-linux-gnu -build_alias = -build_cpu = i686 -build_os = linux-gnu -build_vendor = suse -builddir = . -datadir = ${datarootdir} -datarootdir = ${prefix}/share -docdir = ${datarootdir}/doc/${PACKAGE} -dvidir = ${docdir} -eglib_dir = -exec_prefix = ${prefix} -export_ldflags = -Wl,--export-dynamic -host = i686-suse-linux-gnu -host_alias = -host_cpu = i686 -host_os = linux-gnu -host_vendor = suse -htmldir = ${docdir} -ikvm_native_dir = ikvm-native -includedir = ${prefix}/include -infodir = ${datarootdir}/info -install_sh = $(SHELL) /home/rodrigo/repo/mono/install-sh -libdir = ${exec_prefix}/lib -libexecdir = ${exec_prefix}/libexec -libgc_dir = libgc -libgdiplus_loc = -libmono_cflags = -D_REENTRANT -libmono_ldflags = -lpthread -lrt -ldl -lpthread -lm -libsuffix = .so -localedir = ${datarootdir}/locale -localstatedir = ${prefix}/var -mandir = ${datarootdir}/man -mcs_topdir = $(top_srcdir)/../mcs -mcs_topdir_from_srcdir = $(top_builddir)/../mcs -mkdir_p = /bin/mkdir -p -mono_build_root = /home/rodrigo/repo/mono -mono_cfg_dir = /home/rodrigo/repo/mono/runtime/etc -mono_runtime = mono/mini/mono -oldincludedir = /usr/include -pdfdir = ${docdir} -prefix = /home/rodrigo/repo/install/ -program_transform_name = s,x,x, -psdir = ${docdir} -reloc_libdir = lib -sbindir = ${exec_prefix}/sbin -sharedstatedir = ${prefix}/com -srcdir = . -subdirs = libgc -sysconfdir = ${prefix}/etc -target = i686-suse-linux-gnu -target_alias = -target_cpu = i686 -target_os = linux-gnu -target_vendor = suse -top_builddir = ../../.. -top_srcdir = ../../.. -#INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -#noinst_LTLIBRARIES = libmonoarch-x86.la -#libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h -all: all-am - -.SUFFIXES: -.SUFFIXES: .c .lo .o .obj -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign mono/arch/x86/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign mono/arch/x86/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -clean-noinstLTLIBRARIES: - -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) - @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ - dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ - test "$$dir" != "$$p" || dir=.; \ - echo "rm -f \"$${dir}/so_locations\""; \ - rm -f "$${dir}/so_locations"; \ - done -libmonoarch-x86.la: $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_DEPENDENCIES) - $(if $(V),,@echo -e "LD\t$@";) $(LINK) $(am_libmonoarch_x86_la_rpath) $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_LIBADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -include ./$(DEPDIR)/tramp.Plo - -.c.o: - $(if $(V),,@echo -e "CC\t$@";) $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c $< - -.c.obj: - $(if $(V),,@echo -e "CC\t$@";) $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` - $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -# source='$<' object='$@' libtool=no \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(COMPILE) -c `$(CYGPATH_W) '$<'` - -.c.lo: - $(if $(V),,@echo -e "CC\t$@";) $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< - $(if $(V),,@)mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo -# source='$<' object='$@' libtool=yes \ -# DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) \ -# $(LTCOMPILE) -c -o $@ $< - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(LTLIBRARIES) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ - mostlyclean-am - -distclean: distclean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-exec-am: - -install-html: install-html-am - -install-info: install-info-am - -install-man: - -install-pdf: install-pdf-am - -install-ps: install-ps-am - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-compile mostlyclean-generic \ - mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ - clean-libtool clean-noinstLTLIBRARIES ctags distclean \ - distclean-compile distclean-generic distclean-libtool \ - distclean-tags distdir dvi dvi-am html html-am info info-am \ - install install-am install-data install-data-am install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - pdf pdf-am ps ps-am tags uninstall uninstall-am - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/x86/Makefile.in b/x86/Makefile.in deleted file mode 100644 index ef5a245..0000000 --- a/x86/Makefile.in +++ /dev/null @@ -1,506 +0,0 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -target_triplet = @target@ -subdir = mono/arch/x86 -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/nls.m4 $(top_srcdir)/po.m4 \ - $(top_srcdir)/progtest.m4 $(top_srcdir)/acinclude.m4 \ - $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/config.h -CONFIG_CLEAN_FILES = -LTLIBRARIES = $(noinst_LTLIBRARIES) -libmonoarch_x86_la_LIBADD = -am__libmonoarch_x86_la_SOURCES_DIST = tramp.c x86-codegen.h -@INTERP_SUPPORTED_TRUE@am_libmonoarch_x86_la_OBJECTS = tramp.lo -libmonoarch_x86_la_OBJECTS = $(am_libmonoarch_x86_la_OBJECTS) -@INTERP_SUPPORTED_TRUE@am_libmonoarch_x86_la_rpath = -DEFAULT_INCLUDES = -I. -I$(top_builddir)@am__isrc@ -depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ - $(LDFLAGS) -o $@ -SOURCES = $(libmonoarch_x86_la_SOURCES) -DIST_SOURCES = $(am__libmonoarch_x86_la_SOURCES_DIST) -ETAGS = etags -CTAGS = ctags -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMTAR = @AMTAR@ -API_VER = @API_VER@ -AR = @AR@ -AS = @AS@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -BASE_DEPENDENCIES_CFLAGS = @BASE_DEPENDENCIES_CFLAGS@ -BASE_DEPENDENCIES_LIBS = @BASE_DEPENDENCIES_LIBS@ -BISON = @BISON@ -BUILD_EXEEXT = @BUILD_EXEEXT@ -BUILD_GLIB_CFLAGS = @BUILD_GLIB_CFLAGS@ -BUILD_GLIB_LIBS = @BUILD_GLIB_LIBS@ -CC = @CC@ -CCAS = @CCAS@ -CCASDEPMODE = @CCASDEPMODE@ -CCASFLAGS = @CCASFLAGS@ -CCDEPMODE = @CCDEPMODE@ -CC_FOR_BUILD = @CC_FOR_BUILD@ -CFLAGS = @CFLAGS@ -CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DISABLE_SHARED_HANDLES = @DISABLE_SHARED_HANDLES@ -DLLTOOL = @DLLTOOL@ -DOLT_BASH = @DOLT_BASH@ -DTRACE = @DTRACE@ -DTRACEFLAGS = @DTRACEFLAGS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ -FFLAGS = @FFLAGS@ -GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ -GLIB_CFLAGS = @GLIB_CFLAGS@ -GLIB_LIBS = @GLIB_LIBS@ -GMODULE_CFLAGS = @GMODULE_CFLAGS@ -GMODULE_LIBS = @GMODULE_LIBS@ -GMSGFMT = @GMSGFMT@ -GMSGFMT_015 = @GMSGFMT_015@ -GREP = @GREP@ -HAVE_BOEHM_GC = @HAVE_BOEHM_GC@ -HOST_CC = @HOST_CC@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INTL = @INTL@ -LDFLAGS = @LDFLAGS@ -LIBC = @LIBC@ -LIBGC_CFLAGS = @LIBGC_CFLAGS@ -LIBGC_LIBS = @LIBGC_LIBS@ -LIBGC_STATIC_LIBS = @LIBGC_STATIC_LIBS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTCOMPILE = @LTCOMPILE@ -LTCXXCOMPILE = @LTCXXCOMPILE@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MKDIR_P = @MKDIR_P@ -MONO_DL_NEED_USCORE = @MONO_DL_NEED_USCORE@ -MSGFMT = @MSGFMT@ -MSGFMT_015 = @MSGFMT_015@ -MSGMERGE = @MSGMERGE@ -OBJDUMP = @OBJDUMP@ -OBJEXT = @OBJEXT@ -OPROFILE_CFLAGS = @OPROFILE_CFLAGS@ -OPROFILE_LIBS = @OPROFILE_LIBS@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PKG_CONFIG = @PKG_CONFIG@ -RANLIB = @RANLIB@ -SED = @SED@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SQLITE = @SQLITE@ -SQLITE3 = @SQLITE3@ -STRIP = @STRIP@ -USE_NLS = @USE_NLS@ -VERSION = @VERSION@ -X11 = @X11@ -XATTR_LIB = @XATTR_LIB@ -XGETTEXT = @XGETTEXT@ -XGETTEXT_015 = @XGETTEXT_015@ -XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ -XMKMF = @XMKMF@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -arch_target = @arch_target@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -eglib_dir = @eglib_dir@ -exec_prefix = @exec_prefix@ -export_ldflags = @export_ldflags@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -ikvm_native_dir = @ikvm_native_dir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -libgc_dir = @libgc_dir@ -libgdiplus_loc = @libgdiplus_loc@ -libmono_cflags = @libmono_cflags@ -libmono_ldflags = @libmono_ldflags@ -libsuffix = @libsuffix@ -localedir = @localedir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mcs_topdir = @mcs_topdir@ -mcs_topdir_from_srcdir = @mcs_topdir_from_srcdir@ -mkdir_p = @mkdir_p@ -mono_build_root = @mono_build_root@ -mono_cfg_dir = @mono_cfg_dir@ -mono_runtime = @mono_runtime@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -reloc_libdir = @reloc_libdir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -subdirs = @subdirs@ -sysconfdir = @sysconfdir@ -target = @target@ -target_alias = @target_alias@ -target_cpu = @target_cpu@ -target_os = @target_os@ -target_vendor = @target_vendor@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -@INTERP_SUPPORTED_TRUE@INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) -@INTERP_SUPPORTED_TRUE@noinst_LTLIBRARIES = libmonoarch-x86.la -@INTERP_SUPPORTED_TRUE@libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h -all: all-am - -.SUFFIXES: -.SUFFIXES: .c .lo .o .obj -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign mono/arch/x86/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign mono/arch/x86/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -clean-noinstLTLIBRARIES: - -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) - @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ - dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ - test "$$dir" != "$$p" || dir=.; \ - echo "rm -f \"$${dir}/so_locations\""; \ - rm -f "$${dir}/so_locations"; \ - done -libmonoarch-x86.la: $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_DEPENDENCIES) - $(LINK) $(am_libmonoarch_x86_la_rpath) $(libmonoarch_x86_la_OBJECTS) $(libmonoarch_x86_la_LIBADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tramp.Plo@am__quote@ - -.c.o: -@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(COMPILE) -c $< - -.c.obj: -@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` - -.c.lo: -@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(LTLIBRARIES) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ - mostlyclean-am - -distclean: distclean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-exec-am: - -install-html: install-html-am - -install-info: install-info-am - -install-man: - -install-pdf: install-pdf-am - -install-ps: install-ps-am - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-compile mostlyclean-generic \ - mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: install-am install-strip - -.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ - clean-libtool clean-noinstLTLIBRARIES ctags distclean \ - distclean-compile distclean-generic distclean-libtool \ - distclean-tags distdir dvi dvi-am html html-am info info-am \ - install install-am install-data install-data-am install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - pdf pdf-am ps ps-am tags uninstall uninstall-am - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/x86/d b/x86/d deleted file mode 100644 index 2d34df2..0000000 --- a/x86/d +++ /dev/null @@ -1,103 +0,0 @@ -diff --cc mono/arch/x86/x86-codegen.h -index 59a230f,5333488..0000000 ---- a/mono/arch/x86/x86-codegen.h -+++ b/mono/arch/x86/x86-codegen.h -@@@ -1745,45 -1745,38 +1745,7 @@@ typedef enum - *(inst)++ = (unsigned char)0x2c; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) --- -- #define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ -- do { \ -- *(inst)++ = (unsigned char)0x0F; \ -- *(inst)++ = (unsigned char)(opc); \ -- x86_reg_emit ((inst), (dreg), (reg)); \ -- } while (0) -- -- #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ -- do { \ -- *(inst)++ = (unsigned char)0x66; \ -- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ -- } while (0) -- ---#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ --- do { \ -- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ -- } while (0) -- -- #define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ -- do { \ -- *(inst)++ = (unsigned char)0xF2; \ -- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ -- } while (0) -- -- #define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ -- do { \ -- *(inst)++ = (unsigned char)0xF3; \ -- x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ -- } while (0) -- -- #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ -- do { \ -- *(inst)++ = (unsigned char)0x0f; \ -- *(inst)++ = (unsigned char)0x10; \ -- x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ -- } while (0) -- -- #define x86_movups_membase_reg(inst,basereg,disp,reg) \ - - *(inst)++ = (unsigned char)0x0F; \ - - *(inst)++ = (unsigned char)(opc); \ - - x86_reg_emit ((inst), (dreg), (reg)); \ - - } while (0) - - - -#define x86_sse3_alu_ps_reg_reg(inst,opc,dreg,reg) \ - - do { \ - - *(inst)++ = (unsigned char)0xF2; \ - - *(inst)++ = (unsigned char)0x0F; \ - - *(inst)++ = (unsigned char)(opc); \ - - x86_reg_emit ((inst), (dreg), (reg)); \ - - } while (0) - - - -#define x86_sse_alu_i_reg_reg(inst,opc,dreg,reg) \ - - do { \ - - *(inst)++ = (unsigned char)0x66; \ - - *(inst)++ = (unsigned char)0x0F; \ - - *(inst)++ = (unsigned char)(opc); \ - - x86_reg_emit ((inst), (dreg), (reg)); \ - - } while (0) - - - -#define x86_movups_reg_membase(inst,sreg,basereg,disp) \ - - do { \ - - *(inst)++ = (unsigned char)0x0f; \ - - *(inst)++ = (unsigned char)0x10; \ - - x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ - - } while (0) - - - -#define x86_movups_membase_reg(inst,basereg,disp,reg) \ -++e_reg(inst,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ -@@@ -1804,14 -1797,15 +1766,6 @@@ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -- #define x86_movaps_reg_reg(inst,dreg,sreg) \ -- do { \ -- *(inst)++ = (unsigned char)0x0f; \ -- *(inst)++ = (unsigned char)0x28; \ -- x86_reg_emit ((inst), (dreg), (sreg)); \ -- } while (0) --- -- - -#define x86_movups_reg_reg(inst,dreg,sreg) \ - - do { \ - - *(inst)++ = (unsigned char)0x0f; \ - - *(inst)++ = (unsigned char)0x10; \ - - x86_reg_emit ((inst), (dreg), (sreg)); \ - - } while (0) - - - - - #define x86_movd_reg_xreg(inst,dreg,sreg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ diff --git a/x86/x86-codegen.h~ b/x86/x86-codegen.h~ deleted file mode 100644 index a51c0fe..0000000 --- a/x86/x86-codegen.h~ +++ /dev/null @@ -1,1905 +0,0 @@ -/* - * x86-codegen.h: Macros for generating x86 code - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Intel Corporation (ORP Project) - * Sergey Chaban (serge@wildwestsoftware.com) - * Dietmar Maurer (dietmar@ximian.com) - * Patrik Torstensson - * - * Copyright (C) 2000 Intel Corporation. All rights reserved. - * Copyright (C) 2001, 2002 Ximian, Inc. - */ - -#ifndef X86_H -#define X86_H -#include -/* -// x86 register numbers -*/ -typedef enum { - X86_EAX = 0, - X86_ECX = 1, - X86_EDX = 2, - X86_EBX = 3, - X86_ESP = 4, - X86_EBP = 5, - X86_ESI = 6, - X86_EDI = 7, - X86_NREG -} X86_Reg_No; - -typedef enum { - X86_XMM0, - X86_XMM1, - X86_XMM2, - X86_XMM3, - X86_XMM4, - X86_XMM5, - X86_XMM6, - X86_XMM7, - X86_XMM_NREG -} X86_XMM_Reg_No; - -/* -// opcodes for alu instructions -*/ -typedef enum { - X86_ADD = 0, - X86_OR = 1, - X86_ADC = 2, - X86_SBB = 3, - X86_AND = 4, - X86_SUB = 5, - X86_XOR = 6, - X86_CMP = 7, - X86_NALU -} X86_ALU_Opcode; -/* -// opcodes for shift instructions -*/ -typedef enum { - X86_SHLD, - X86_SHLR, - X86_ROL = 0, - X86_ROR = 1, - X86_RCL = 2, - X86_RCR = 3, - X86_SHL = 4, - X86_SHR = 5, - X86_SAR = 7, - X86_NSHIFT = 8 -} X86_Shift_Opcode; -/* -// opcodes for floating-point instructions -*/ -typedef enum { - X86_FADD = 0, - X86_FMUL = 1, - X86_FCOM = 2, - X86_FCOMP = 3, - X86_FSUB = 4, - X86_FSUBR = 5, - X86_FDIV = 6, - X86_FDIVR = 7, - X86_NFP = 8 -} X86_FP_Opcode; -/* -// integer conditions codes -*/ -typedef enum { - X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0, - X86_CC_NE = 1, X86_CC_NZ = 1, - X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2, - X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3, - X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4, - X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5, - X86_CC_LZ = 6, X86_CC_S = 6, - X86_CC_GEZ = 7, X86_CC_NS = 7, - X86_CC_P = 8, X86_CC_PE = 8, - X86_CC_NP = 9, X86_CC_PO = 9, - X86_CC_O = 10, - X86_CC_NO = 11, - X86_NCC -} X86_CC; - -/* FP status */ -enum { - X86_FP_C0 = 0x100, - X86_FP_C1 = 0x200, - X86_FP_C2 = 0x400, - X86_FP_C3 = 0x4000, - X86_FP_CC_MASK = 0x4500 -}; - -/* FP control word */ -enum { - X86_FPCW_INVOPEX_MASK = 0x1, - X86_FPCW_DENOPEX_MASK = 0x2, - X86_FPCW_ZERODIV_MASK = 0x4, - X86_FPCW_OVFEX_MASK = 0x8, - X86_FPCW_UNDFEX_MASK = 0x10, - X86_FPCW_PRECEX_MASK = 0x20, - X86_FPCW_PRECC_MASK = 0x300, - X86_FPCW_ROUNDC_MASK = 0xc00, - - /* values for precision control */ - X86_FPCW_PREC_SINGLE = 0, - X86_FPCW_PREC_DOUBLE = 0x200, - X86_FPCW_PREC_EXTENDED = 0x300, - - /* values for rounding control */ - X86_FPCW_ROUND_NEAREST = 0, - X86_FPCW_ROUND_DOWN = 0x400, - X86_FPCW_ROUND_UP = 0x800, - X86_FPCW_ROUND_TOZERO = 0xc00 -}; - -/* -// prefix code -*/ -typedef enum { - X86_LOCK_PREFIX = 0xF0, - X86_REPNZ_PREFIX = 0xF2, - X86_REPZ_PREFIX = 0xF3, - X86_REP_PREFIX = 0xF3, - X86_CS_PREFIX = 0x2E, - X86_SS_PREFIX = 0x36, - X86_DS_PREFIX = 0x3E, - X86_ES_PREFIX = 0x26, - X86_FS_PREFIX = 0x64, - X86_GS_PREFIX = 0x65, - X86_UNLIKELY_PREFIX = 0x2E, - X86_LIKELY_PREFIX = 0x3E, - X86_OPERAND_PREFIX = 0x66, - X86_ADDRESS_PREFIX = 0x67 -} X86_Prefix; - -static const unsigned char -x86_cc_unsigned_map [X86_NCC] = { - 0x74, /* eq */ - 0x75, /* ne */ - 0x72, /* lt */ - 0x76, /* le */ - 0x77, /* gt */ - 0x73, /* ge */ - 0x78, /* lz */ - 0x79, /* gez */ - 0x7a, /* p */ - 0x7b, /* np */ - 0x70, /* o */ - 0x71, /* no */ -}; - -static const unsigned char -x86_cc_signed_map [X86_NCC] = { - 0x74, /* eq */ - 0x75, /* ne */ - 0x7c, /* lt */ - 0x7e, /* le */ - 0x7f, /* gt */ - 0x7d, /* ge */ - 0x78, /* lz */ - 0x79, /* gez */ - 0x7a, /* p */ - 0x7b, /* np */ - 0x70, /* o */ - 0x71, /* no */ -}; - -typedef union { - int val; - unsigned char b [4]; -} x86_imm_buf; - -#define X86_NOBASEREG (-1) - -/* -// bitvector mask for callee-saved registers -*/ -#define X86_ESI_MASK (1<> 6) -#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7) -#define x86_modrm_rm(modrm) ((modrm) & 0x7) - -#define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0) -#define x86_imm_emit32(inst,imm) \ - do { \ - x86_imm_buf imb; imb.val = (int) (imm); \ - *(inst)++ = imb.b [0]; \ - *(inst)++ = imb.b [1]; \ - *(inst)++ = imb.b [2]; \ - *(inst)++ = imb.b [3]; \ - } while (0) -#define x86_imm_emit16(inst,imm) do { *(short*)(inst) = (imm); (inst) += 2; } while (0) -#define x86_imm_emit8(inst,imm) do { *(inst) = (unsigned char)((imm) & 0xff); ++(inst); } while (0) -#define x86_is_imm8(imm) (((int)(imm) >= -128 && (int)(imm) <= 127)) -#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1))) - -#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0) -#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0) -#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0) -#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) - -#define x86_membase_emit(inst,r,basereg,disp) do {\ - if ((basereg) == X86_ESP) { \ - if ((disp) == 0) { \ - x86_address_byte ((inst), 0, (r), X86_ESP); \ - x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ - } else if (x86_is_imm8((disp))) { \ - x86_address_byte ((inst), 1, (r), X86_ESP); \ - x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ - x86_imm_emit8 ((inst), (disp)); \ - } else { \ - x86_address_byte ((inst), 2, (r), X86_ESP); \ - x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - break; \ - } \ - if ((disp) == 0 && (basereg) != X86_EBP) { \ - x86_address_byte ((inst), 0, (r), (basereg)); \ - break; \ - } \ - if (x86_is_imm8((disp))) { \ - x86_address_byte ((inst), 1, (r), (basereg)); \ - x86_imm_emit8 ((inst), (disp)); \ - } else { \ - x86_address_byte ((inst), 2, (r), (basereg)); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - } while (0) - -#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ - do { \ - if ((basereg) == X86_NOBASEREG) { \ - x86_address_byte ((inst), 0, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), 5); \ - x86_imm_emit32 ((inst), (disp)); \ - } else if ((disp) == 0 && (basereg) != X86_EBP) { \ - x86_address_byte ((inst), 0, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ - } else if (x86_is_imm8((disp))) { \ - x86_address_byte ((inst), 1, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ - x86_imm_emit8 ((inst), (disp)); \ - } else { \ - x86_address_byte ((inst), 2, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), 5); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - } while (0) - -/* - * target is the position in the code where to jump to: - * target = code; - * .. output loop code... - * x86_mov_reg_imm (code, X86_EAX, 0); - * loop = code; - * x86_loop (code, -1); - * ... finish method - * - * patch displacement - * x86_patch (loop, target); - * - * ins should point at the start of the instruction that encodes a target. - * the instruction is inspected for validity and the correct displacement - * is inserted. - */ -#define x86_patch(ins,target) \ - do { \ - unsigned char* pos = (ins) + 1; \ - int disp, size = 0; \ - switch (*(unsigned char*)(ins)) { \ - case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \ - case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \ - ++size; ++pos; break; /* prefix for 32-bit disp */ \ - case 0xe0: case 0xe1: case 0xe2: /* loop */ \ - case 0xeb: /* jump8 */ \ - /* conditional jump opcodes */ \ - case 0x70: case 0x71: case 0x72: case 0x73: \ - case 0x74: case 0x75: case 0x76: case 0x77: \ - case 0x78: case 0x79: case 0x7a: case 0x7b: \ - case 0x7c: case 0x7d: case 0x7e: case 0x7f: \ - break; \ - default: assert (0); \ - } \ - disp = (target) - pos; \ - if (size) x86_imm_emit32 (pos, disp - 4); \ - else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \ - else assert (0); \ - } while (0) - -#define x86_breakpoint(inst) \ - do { \ - *(inst)++ = 0xcc; \ - } while (0) - -#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0) -#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0) -#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0) -#define x86_stosd(inst) x86_stosl((inst)) -#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0) -#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) -#define x86_movsd(inst) x86_movsl((inst)) - -#define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) - -#define x86_rdtsc(inst) \ - do { \ - *(inst)++ = 0x0f; \ - *(inst)++ = 0x31; \ - } while (0) - -#define x86_cmpxchg_reg_reg(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xb1; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_cmpxchg_mem_reg(inst,mem,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xb1; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xb1; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_xchg_reg_reg(inst,dreg,reg,size) \ - do { \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0x86; \ - else \ - *(inst)++ = (unsigned char)0x87; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_xchg_mem_reg(inst,mem,reg,size) \ - do { \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0x86; \ - else \ - *(inst)++ = (unsigned char)0x87; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \ - do { \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0x86; \ - else \ - *(inst)++ = (unsigned char)0x87; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_xadd_reg_reg(inst,dreg,reg,size) \ - do { \ - *(inst)++ = (unsigned char)0x0F; \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0xC0; \ - else \ - *(inst)++ = (unsigned char)0xC1; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_xadd_mem_reg(inst,mem,reg,size) \ - do { \ - *(inst)++ = (unsigned char)0x0F; \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0xC0; \ - else \ - *(inst)++ = (unsigned char)0xC1; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \ - do { \ - *(inst)++ = (unsigned char)0x0F; \ - if ((size) == 1) \ - *(inst)++ = (unsigned char)0xC0; \ - else \ - *(inst)++ = (unsigned char)0xC1; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_inc_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_mem_emit ((inst), 0, (mem)); \ - } while (0) - -#define x86_inc_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - } while (0) - -#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0) - -#define x86_dec_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_mem_emit ((inst), 1, (mem)); \ - } while (0) - -#define x86_dec_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 1, (basereg), (disp)); \ - } while (0) - -#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0) - -#define x86_not_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_mem_emit ((inst), 2, (mem)); \ - } while (0) - -#define x86_not_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_membase_emit ((inst), 2, (basereg), (disp)); \ - } while (0) - -#define x86_not_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_reg_emit ((inst), 2, (reg)); \ - } while (0) - -#define x86_neg_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_mem_emit ((inst), 3, (mem)); \ - } while (0) - -#define x86_neg_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_membase_emit ((inst), 3, (basereg), (disp)); \ - } while (0) - -#define x86_neg_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_reg_emit ((inst), 3, (reg)); \ - } while (0) - -#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0) - -#define x86_alu_reg_imm(inst,opc,reg,imm) \ - do { \ - if ((reg) == X86_EAX) { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ - x86_imm_emit32 ((inst), (imm)); \ - break; \ - } \ - if (x86_is_imm8((imm))) { \ - *(inst)++ = (unsigned char)0x83; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x81; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_alu_mem_imm(inst,opc,mem,imm) \ - do { \ - if (x86_is_imm8((imm))) { \ - *(inst)++ = (unsigned char)0x83; \ - x86_mem_emit ((inst), (opc), (mem)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x81; \ - x86_mem_emit ((inst), (opc), (mem)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \ - do { \ - if (x86_is_imm8((imm))) { \ - *(inst)++ = (unsigned char)0x83; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x81; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \ - do { \ - *(inst)++ = (unsigned char)0x80; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_alu_mem_reg(inst,opc,mem,reg) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_alu_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -/** - * @x86_alu_reg8_reg8: - * Supports ALU operations between two 8-bit registers. - * dreg := dreg opc reg - * X86_Reg_No enum is used to specify the registers. - * Additionally is_*_h flags are used to specify what part - * of a given 32-bit register is used - high (TRUE) or low (FALSE). - * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH - */ -#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \ - x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \ - } while (0) - -#define x86_alu_reg_mem(inst,opc,reg,mem) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \ - do { \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_test_reg_imm(inst,reg,imm) \ - do { \ - if ((reg) == X86_EAX) { \ - *(inst)++ = (unsigned char)0xa9; \ - } else { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_reg_emit ((inst), 0, (reg)); \ - } \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_test_mem_imm(inst,mem,imm) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_mem_emit ((inst), 0, (mem)); \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_test_membase_imm(inst,basereg,disp,imm) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_test_reg_reg(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x85; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_test_mem_reg(inst,mem,reg) \ - do { \ - *(inst)++ = (unsigned char)0x85; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_test_membase_reg(inst,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x85; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_shift_reg_imm(inst,opc,reg,imm) \ - do { \ - if ((imm) == 1) { \ - *(inst)++ = (unsigned char)0xd1; \ - x86_reg_emit ((inst), (opc), (reg)); \ - } else { \ - *(inst)++ = (unsigned char)0xc1; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit8 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_shift_mem_imm(inst,opc,mem,imm) \ - do { \ - if ((imm) == 1) { \ - *(inst)++ = (unsigned char)0xd1; \ - x86_mem_emit ((inst), (opc), (mem)); \ - } else { \ - *(inst)++ = (unsigned char)0xc1; \ - x86_mem_emit ((inst), (opc), (mem)); \ - x86_imm_emit8 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \ - do { \ - if ((imm) == 1) { \ - *(inst)++ = (unsigned char)0xd1; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - } else { \ - *(inst)++ = (unsigned char)0xc1; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_shift_reg(inst,opc,reg) \ - do { \ - *(inst)++ = (unsigned char)0xd3; \ - x86_reg_emit ((inst), (opc), (reg)); \ - } while (0) - -#define x86_shift_mem(inst,opc,mem) \ - do { \ - *(inst)++ = (unsigned char)0xd3; \ - x86_mem_emit ((inst), (opc), (mem)); \ - } while (0) - -#define x86_shift_membase(inst,opc,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xd3; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - } while (0) - -/* - * Multi op shift missing. - */ - -#define x86_shrd_reg(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xad; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xac; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - x86_imm_emit8 ((inst), (shamt)); \ - } while (0) - -#define x86_shld_reg(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xa5; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - } while (0) - -#define x86_shld_reg_imm(inst,dreg,reg,shamt) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xa4; \ - x86_reg_emit ((inst), (reg), (dreg)); \ - x86_imm_emit8 ((inst), (shamt)); \ - } while (0) - -/* - * EDX:EAX = EAX * rm - */ -#define x86_mul_reg(inst,reg,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \ - } while (0) - -#define x86_mul_mem(inst,mem,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \ - } while (0) - -#define x86_mul_membase(inst,basereg,disp,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ - } while (0) - -/* - * r *= rm - */ -#define x86_imul_reg_reg(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xaf; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_imul_reg_mem(inst,reg,mem) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xaf; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_imul_reg_membase(inst,reg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0xaf; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -/* - * dreg = rm * imm - */ -#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \ - do { \ - if (x86_is_imm8 ((imm))) { \ - *(inst)++ = (unsigned char)0x6b; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x69; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \ - do { \ - if (x86_is_imm8 ((imm))) { \ - *(inst)++ = (unsigned char)0x6b; \ - x86_mem_emit ((inst), (reg), (mem)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x69; \ - x86_reg_emit ((inst), (reg), (mem)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \ - do { \ - if (x86_is_imm8 ((imm))) { \ - *(inst)++ = (unsigned char)0x6b; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x69; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -/* - * divide EDX:EAX by rm; - * eax = quotient, edx = remainder - */ - -#define x86_div_reg(inst,reg,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \ - } while (0) - -#define x86_div_mem(inst,mem,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \ - } while (0) - -#define x86_div_membase(inst,basereg,disp,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0xf7; \ - x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ - } while (0) - -#define x86_mov_mem_reg(inst,mem,reg,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_mov_regp_reg(inst,regp,reg,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_regp_emit ((inst), (reg), (regp)); \ - } while (0) - -#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ - } while (0) - -#define x86_mov_reg_reg(inst,dreg,reg,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_mov_reg_mem(inst,reg,mem,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ - case 4: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ - } while (0) - -/* - * Note: x86_clear_reg () chacnges the condition code! - */ -#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg)) - -#define x86_mov_reg_imm(inst,reg,imm) \ - do { \ - *(inst)++ = (unsigned char)0xb8 + (reg); \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_mov_mem_imm(inst,mem,imm,size) \ - do { \ - if ((size) == 1) { \ - *(inst)++ = (unsigned char)0xc6; \ - x86_mem_emit ((inst), 0, (mem)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 2) { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0xc7; \ - x86_mem_emit ((inst), 0, (mem)); \ - x86_imm_emit16 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_mem_emit ((inst), 0, (mem)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \ - do { \ - if ((size) == 1) { \ - *(inst)++ = (unsigned char)0xc6; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 2) { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - x86_imm_emit16 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \ - do { \ - if ((size) == 1) { \ - *(inst)++ = (unsigned char)0xc6; \ - x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 2) { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0xc7; \ - x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ - x86_imm_emit16 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_lea_mem(inst,reg,mem) \ - do { \ - *(inst)++ = (unsigned char)0x8d; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_lea_membase(inst,reg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0x8d; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \ - do { \ - *(inst)++ = (unsigned char)0x8d; \ - x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ - } while (0) - -#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \ - do { \ - unsigned char op = 0xb6; \ - g_assert (is_half || X86_IS_BYTE_REG (reg)); \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) op += 0x08; \ - if ((is_half)) op += 0x01; \ - *(inst)++ = op; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \ - do { \ - unsigned char op = 0xb6; \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) op += 0x08; \ - if ((is_half)) op += 0x01; \ - *(inst)++ = op; \ - x86_mem_emit ((inst), (dreg), (mem)); \ - } while (0) - -#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \ - do { \ - unsigned char op = 0xb6; \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) op += 0x08; \ - if ((is_half)) op += 0x01; \ - *(inst)++ = op; \ - x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ - } while (0) - -#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \ - do { \ - unsigned char op = 0xb6; \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) op += 0x08; \ - if ((is_half)) op += 0x01; \ - *(inst)++ = op; \ - x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \ - } while (0) - -#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0) -#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0) - -#define x86_fp_op_mem(inst,opc,mem,is_double) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ - x86_mem_emit ((inst), (opc), (mem)); \ - } while (0) - -#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ - x86_membase_emit ((inst), (opc), (basereg), (disp)); \ - } while (0) - -#define x86_fp_op(inst,opc,index) \ - do { \ - *(inst)++ = (unsigned char)0xd8; \ - *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \ - } while (0) - -#define x86_fp_op_reg(inst,opc,index,pop_stack) \ - do { \ - static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \ - *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \ - *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \ - } while (0) - -/** - * @x86_fp_int_op_membase - * Supports FPU operations between ST(0) and integer operand in memory. - * Operation encoded using X86_FP_Opcode enum. - * Operand is addressed by [basereg + disp]. - * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE). - */ -#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \ - do { \ - *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \ - x86_membase_emit ((inst), opc, (basereg), (disp)); \ - } while (0) - -#define x86_fstp(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xdd; \ - *(inst)++ = (unsigned char)0xd8+(index); \ - } while (0) - -#define x86_fcompp(inst) \ - do { \ - *(inst)++ = (unsigned char)0xde; \ - *(inst)++ = (unsigned char)0xd9; \ - } while (0) - -#define x86_fucompp(inst) \ - do { \ - *(inst)++ = (unsigned char)0xda; \ - *(inst)++ = (unsigned char)0xe9; \ - } while (0) - -#define x86_fnstsw(inst) \ - do { \ - *(inst)++ = (unsigned char)0xdf; \ - *(inst)++ = (unsigned char)0xe0; \ - } while (0) - -#define x86_fnstcw(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - x86_mem_emit ((inst), 7, (mem)); \ - } while (0) - -#define x86_fnstcw_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - x86_membase_emit ((inst), 7, (basereg), (disp)); \ - } while (0) - -#define x86_fldcw(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - x86_mem_emit ((inst), 5, (mem)); \ - } while (0) - -#define x86_fldcw_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - x86_membase_emit ((inst), 5, (basereg), (disp)); \ - } while (0) - -#define x86_fchs(inst) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xe0; \ - } while (0) - -#define x86_frem(inst) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xf8; \ - } while (0) - -#define x86_fxch(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \ - } while (0) - -#define x86_fcomi(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ - } while (0) - -#define x86_fcomip(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xdf; \ - *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ - } while (0) - -#define x86_fucomi(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ - } while (0) - -#define x86_fucomip(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xdf; \ - *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ - } while (0) - -#define x86_fld(inst,mem,is_double) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - x86_mem_emit ((inst), 0, (mem)); \ - } while (0) - -#define x86_fld_membase(inst,basereg,disp,is_double) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - } while (0) - -#define x86_fld80_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_mem_emit ((inst), 5, (mem)); \ - } while (0) - -#define x86_fld80_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_membase_emit ((inst), 5, (basereg), (disp)); \ - } while (0) - -#define x86_fild(inst,mem,is_long) \ - do { \ - if ((is_long)) { \ - *(inst)++ = (unsigned char)0xdf; \ - x86_mem_emit ((inst), 5, (mem)); \ - } else { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_mem_emit ((inst), 0, (mem)); \ - } \ - } while (0) - -#define x86_fild_membase(inst,basereg,disp,is_long) \ - do { \ - if ((is_long)) { \ - *(inst)++ = (unsigned char)0xdf; \ - x86_membase_emit ((inst), 5, (basereg), (disp)); \ - } else { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - } \ - } while (0) - -#define x86_fld_reg(inst,index) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \ - } while (0) - -#define x86_fldz(inst) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xee; \ - } while (0) - -#define x86_fld1(inst) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xe8; \ - } while (0) - -#define x86_fldpi(inst) \ - do { \ - *(inst)++ = (unsigned char)0xd9; \ - *(inst)++ = (unsigned char)0xeb; \ - } while (0) - -#define x86_fst(inst,mem,is_double,pop_stack) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ - x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \ - } while (0) - -#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \ - do { \ - *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ - x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \ - } while (0) - -#define x86_fst80_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_mem_emit ((inst), 7, (mem)); \ - } while (0) - - -#define x86_fst80_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_membase_emit ((inst), 7, (basereg), (disp)); \ - } while (0) - - -#define x86_fist_pop(inst,mem,is_long) \ - do { \ - if ((is_long)) { \ - *(inst)++ = (unsigned char)0xdf; \ - x86_mem_emit ((inst), 7, (mem)); \ - } else { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_mem_emit ((inst), 3, (mem)); \ - } \ - } while (0) - -#define x86_fist_pop_membase(inst,basereg,disp,is_long) \ - do { \ - if ((is_long)) { \ - *(inst)++ = (unsigned char)0xdf; \ - x86_membase_emit ((inst), 7, (basereg), (disp)); \ - } else { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_membase_emit ((inst), 3, (basereg), (disp)); \ - } \ - } while (0) - -#define x86_fstsw(inst) \ - do { \ - *(inst)++ = (unsigned char)0x9b; \ - *(inst)++ = (unsigned char)0xdf; \ - *(inst)++ = (unsigned char)0xe0; \ - } while (0) - -/** - * @x86_fist_membase - * Converts content of ST(0) to integer and stores it at memory location - * addressed by [basereg + disp]. - * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE). - */ -#define x86_fist_membase(inst,basereg,disp,is_int) \ - do { \ - if ((is_int)) { \ - *(inst)++ = (unsigned char)0xdb; \ - x86_membase_emit ((inst), 2, (basereg), (disp)); \ - } else { \ - *(inst)++ = (unsigned char)0xdf; \ - x86_membase_emit ((inst), 2, (basereg), (disp)); \ - } \ - } while (0) - - -#define x86_push_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0x50 + (reg); \ - } while (0) - -#define x86_push_regp(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_regp_emit ((inst), 6, (reg)); \ - } while (0) - -#define x86_push_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_mem_emit ((inst), 6, (mem)); \ - } while (0) - -#define x86_push_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 6, (basereg), (disp)); \ - } while (0) - -#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \ - } while (0) - -#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0) - -#define x86_push_imm(inst,imm) \ - do { \ - int _imm = (int) (imm); \ - if (x86_is_imm8 (_imm)) { \ - *(inst)++ = (unsigned char)0x6A; \ - x86_imm_emit8 ((inst), (_imm)); \ - } else { \ - *(inst)++ = (unsigned char)0x68; \ - x86_imm_emit32 ((inst), (_imm)); \ - } \ - } while (0) - -#define x86_pop_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0x58 + (reg); \ - } while (0) - -#define x86_pop_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0x87; \ - x86_mem_emit ((inst), 0, (mem)); \ - } while (0) - -#define x86_pop_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0x87; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - } while (0) - -#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0) -#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0) -#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0) -#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0) - -#define x86_loop(inst,imm) \ - do { \ - *(inst)++ = (unsigned char)0xe2; \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_loope(inst,imm) \ - do { \ - *(inst)++ = (unsigned char)0xe1; \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_loopne(inst,imm) \ - do { \ - *(inst)++ = (unsigned char)0xe0; \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_jump32(inst,imm) \ - do { \ - *(inst)++ = (unsigned char)0xe9; \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_jump8(inst,imm) \ - do { \ - *(inst)++ = (unsigned char)0xeb; \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_jump_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 4, (reg)); \ - } while (0) - -#define x86_jump_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_mem_emit ((inst), 4, (mem)); \ - } while (0) - -#define x86_jump_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 4, (basereg), (disp)); \ - } while (0) - -/* - * target is a pointer in our buffer. - */ -#define x86_jump_code(inst,target) \ - do { \ - int t = (unsigned char*)(target) - (inst) - 2; \ - if (x86_is_imm8(t)) { \ - x86_jump8 ((inst), t); \ - } else { \ - t -= 3; \ - x86_jump32 ((inst), t); \ - } \ - } while (0) - -#define x86_jump_disp(inst,disp) \ - do { \ - int t = (disp) - 2; \ - if (x86_is_imm8(t)) { \ - x86_jump8 ((inst), t); \ - } else { \ - t -= 3; \ - x86_jump32 ((inst), t); \ - } \ - } while (0) - -#define x86_branch8(inst,cond,imm,is_signed) \ - do { \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)]; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)]; \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_branch32(inst,cond,imm,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ - x86_imm_emit32 ((inst), (imm)); \ - } while (0) - -#define x86_branch(inst,cond,target,is_signed) \ - do { \ - int offset = (target) - (inst) - 2; \ - if (x86_is_imm8 ((offset))) \ - x86_branch8 ((inst), (cond), offset, (is_signed)); \ - else { \ - offset -= 4; \ - x86_branch32 ((inst), (cond), offset, (is_signed)); \ - } \ - } while (0) - -#define x86_branch_disp(inst,cond,disp,is_signed) \ - do { \ - int offset = (disp) - 2; \ - if (x86_is_imm8 ((offset))) \ - x86_branch8 ((inst), (cond), offset, (is_signed)); \ - else { \ - offset -= 4; \ - x86_branch32 ((inst), (cond), offset, (is_signed)); \ - } \ - } while (0) - -#define x86_set_reg(inst,cond,reg,is_signed) \ - do { \ - g_assert (X86_IS_BYTE_REG (reg)); \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ - x86_reg_emit ((inst), 0, (reg)); \ - } while (0) - -#define x86_set_mem(inst,cond,mem,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ - x86_mem_emit ((inst), 0, (mem)); \ - } while (0) - -#define x86_set_membase(inst,cond,basereg,disp,is_signed) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \ - x86_membase_emit ((inst), 0, (basereg), (disp)); \ - } while (0) - -#define x86_call_imm(inst,disp) \ - do { \ - *(inst)++ = (unsigned char)0xe8; \ - x86_imm_emit32 ((inst), (int)(disp)); \ - } while (0) - -#define x86_call_reg(inst,reg) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 2, (reg)); \ - } while (0) - -#define x86_call_mem(inst,mem) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_mem_emit ((inst), 2, (mem)); \ - } while (0) - -#define x86_call_membase(inst,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 2, (basereg), (disp)); \ - } while (0) - -#define x86_call_code(inst,target) \ - do { \ - int _x86_offset = (unsigned char*)(target) - (inst); \ - _x86_offset -= 5; \ - x86_call_imm ((inst), _x86_offset); \ - } while (0) - -#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) - -#define x86_ret_imm(inst,imm) \ - do { \ - if ((imm) == 0) { \ - x86_ret ((inst)); \ - } else { \ - *(inst)++ = (unsigned char)0xc2; \ - x86_imm_emit16 ((inst), (imm)); \ - } \ - } while (0) - -#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char) 0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \ - do { \ - *(inst)++ = (unsigned char) 0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ - x86_mem_emit ((inst), (reg), (mem)); \ - } while (0) - -#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char) 0x0f; \ - if ((is_signed)) \ - *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ - else \ - *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_enter(inst,framesize) \ - do { \ - *(inst)++ = (unsigned char)0xc8; \ - x86_imm_emit16 ((inst), (framesize)); \ - *(inst)++ = 0; \ - } while (0) - -#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) -#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0) - -#define x86_fsin(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) -#define x86_fcos(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) -#define x86_fabs(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) -#define x86_ftst(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0) -#define x86_fxam(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0) -#define x86_fpatan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) -#define x86_fprem(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) -#define x86_fprem1(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) -#define x86_frndint(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0) -#define x86_fsqrt(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0) -#define x86_fptan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0) - -#define x86_padding(inst,size) \ - do { \ - switch ((size)) { \ - case 1: x86_nop ((inst)); break; \ - case 2: *(inst)++ = 0x8b; \ - *(inst)++ = 0xc0; break; \ - case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \ - *(inst)++ = 0x00; break; \ - case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ - *(inst)++ = 0x24; *(inst)++ = 0x00; \ - break; \ - case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \ - *(inst)++ = 0x24; *(inst)++ = 0x00; \ - x86_nop ((inst)); break; \ - case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \ - *(inst)++ = 0x00; *(inst)++ = 0x00; \ - *(inst)++ = 0x00; *(inst)++ = 0x00; \ - break; \ - case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \ - *(inst)++ = 0x24; *(inst)++ = 0x00; \ - *(inst)++ = 0x00; *(inst)++ = 0x00; \ - *(inst)++ = 0x00; break; \ - default: assert (0); \ - } \ - } while (0) - -#define x86_prolog(inst,frame_size,reg_mask) \ - do { \ - unsigned i, m = 1; \ - x86_enter ((inst), (frame_size)); \ - for (i = 0; i < X86_NREG; ++i, m <<= 1) { \ - if ((reg_mask) & m) \ - x86_push_reg ((inst), i); \ - } \ - } while (0) - -#define x86_epilog(inst,reg_mask) \ - do { \ - unsigned i, m = 1 << X86_EDI; \ - for (i = X86_EDI; m != 0; i--, m=m>>1) { \ - if ((reg_mask) & m) \ - x86_pop_reg ((inst), i); \ - } \ - x86_leave ((inst)); \ - x86_ret ((inst)); \ - } while (0) - - -typedef enum { - X86_SSE_SQRT = 0x51, - X86_SSE_RSQRT = 0x52, - X86_SSE_ADD = 0x58, - X86_SSE_DIV = 0x5E, - X86_SSE_MUL = 0x59, - X86_SSE_SUB = 0x5C, - X86_SSE_MIN = 0x5D, - X86_SSE_MAX = 0x5F, - - X86_SSE_ADDSUB = 0xD0, - X86_SSE_HADD = 0x7C, - X86_SSE_HSUB = 0x7D, - - X86_SSE_PAND = 0xDB, - X86_SSE_POR = 0xEB, - X86_SSE_PXOR = 0xEF, - - X86_SSE_PADDB = 0xFC, - X86_SSE_PADDW = 0xFD, - X86_SSE_PADDD = 0xFE, - - X86_SSE_PSUBB = 0xF8, - X86_SSE_PSUBW = 0xF9, - X86_SSE_PSUBD = 0xFA, - - X86_SSE_PUNPCKLBW = 0x60, - X86_SSE_PUNPCKLWD = 0x61, - X86_SSE_PUNPCKLDQ = 0x62, - X86_SSE_PUNPCKLQDQ = 0x6C, - - X86_SSE_PUNPCKHBW = 0x68, - X86_SSE_PUNPCKHWD = 0x69, - X86_SSE_PUNPCKHDQ = 0x6A, - X86_SSE_PUNPCKHQDQ = 0x6D, - - X86_SSE_PACKUSWB = 0x67, - X86_SSE_PACKUSDW = 0x2B,/*sse41*/ - - X86_SSE_PADDUSB = 0xDC, - X86_SSE_PADDUSW = 0xDD, - X86_SSE_PSUBUSB = 0xD8, - X86_SSE_PSUBUSW = 0xD9, - - X86_SSE_PMULLW = 0xD5, - X86_SSE_PMULLD = 0x40,/*sse41*/ - - X86_SSE_PSHIFTW = 0x71, - X86_SSE_PSHIFTD = 0x72, - X86_SSE_SHR = 2, - X86_SSE_SAR = 4, - X86_SSE_SHL = 6, - - X86_SSE_PSRLW_REG = 0xD1, - X86_SSE_PSRAW_REG = 0xE1, - X86_SSE_PSLLW_REG = 0xF1, - - X86_SSE_PSRLD_REG = 0xD2, - X86_SSE_PSRAD_REG = 0xE2, - X86_SSE_PSLLD_REG = 0xF2, - -} X86_SSE_Opcode; - - -/* minimal SSE* support */ -#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0xf2; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (dreg), (basereg), (disp)); \ - } while (0) - -#define x86_cvttsd2si(inst,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0xf2; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x2c; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0F; \ - *(inst)++ = (unsigned char)(opc); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ - do { \ - x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0xF2; \ - x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0xF3; \ - x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ - } while (0) - -#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0x0F; \ - *(inst)++ = (unsigned char)0x38; \ - *(inst)++ = (unsigned char)(opc); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - - -#define x86_movups_reg_membase(inst,sreg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ - } while (0) - -#define x86_movups_membase_reg(inst,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x28; \ - x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ - } while (0) - -#define x86_movaps_membase_reg(inst,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x29; \ - x86_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define x86_movaps_reg_reg(inst,dreg,sreg) \ - do { \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x28; \ - x86_reg_emit ((inst), (dreg), (sreg)); \ - } while (0) - - -#define x86_movd_reg_xreg(inst,dreg,sreg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x7e; \ - x86_reg_emit ((inst), (sreg), (dreg)); \ - } while (0) - -#define x86_movd_xreg_reg(inst,dreg,sreg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x6e; \ - x86_reg_emit ((inst), (dreg), (sreg)); \ - } while (0) - -#define x86_pshufd_reg_reg(inst,dreg,sreg,mask) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x70; \ - x86_reg_emit ((inst), (dreg), (sreg)); \ - *(inst)++ = (unsigned char)mask; \ - } while (0) - -#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \ - do { \ - x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \ - x86_imm_emit8 ((inst), (imm)); \ - } while (0) - -#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \ - do { \ - x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \ - } while (0) - -#endif // X86_H - -- cgit v1.1 From 2ffed07a8205616ea4a1605338f08c8ad6c77432 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 24 Oct 2008 13:36:53 +0000 Subject: 2008-10-24 Rodrigo Kumpera * x86/x86-codegen.h: Add signed packed mul high. svn path=/trunk/mono/; revision=116936 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 1 + 2 files changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index 9db33d3..7a6b48b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-24 Rodrigo Kumpera + + * x86/x86-codegen.h: Add signed packed mul high. + 2008-10-23 Rodrigo Kumpera * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 26c03a3..c7816c5 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1801,6 +1801,7 @@ typedef enum { X86_SSE_PMULLW = 0xD5, X86_SSE_PMULLD = 0x40,/*sse41*/ X86_SSE_PMULHUW = 0xE4, + X86_SSE_PMULHW = 0xE5, X86_SSE_PMOVMSKB = 0xD7, -- cgit v1.1 From bf9bec59fad96b9a7cb38921c26bb1c176fe40ce Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 24 Oct 2008 21:58:17 +0000 Subject: 2008-10-24 Rodrigo Kumpera * x86/x86-codegen.h: Add signed pack with saturation. svn path=/trunk/mono/; revision=116995 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 3 +++ 2 files changed, 7 insertions(+) diff --git a/ChangeLog b/ChangeLog index 7a6b48b..c47e0ed 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,9 @@ 2008-10-24 Rodrigo Kumpera + * x86/x86-codegen.h: Add signed pack with saturation. + +2008-10-24 Rodrigo Kumpera + * x86/x86-codegen.h: Add signed packed mul high. 2008-10-23 Rodrigo Kumpera diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index c7816c5..9c83b2f 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1785,6 +1785,9 @@ typedef enum { X86_SSE_PUNPCKHDQ = 0x6A, X86_SSE_PUNPCKHQDQ = 0x6D, + X86_SSE_PACKSSWB = 0x63, + X86_SSE_PACKSSDW = 0x6B, + X86_SSE_PACKUSWB = 0x67, X86_SSE_PACKUSDW = 0x2B,/*sse41*/ -- cgit v1.1 From 3fffcb4ac5879f2655ee3b4b3bee093a9eaa5016 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 28 Oct 2008 00:05:56 +0000 Subject: 2008-10-27 Rodrigo Kumpera * x86/x86-codegen.h: Add movddup. svn path=/trunk/mono/; revision=117220 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index c47e0ed..23a67a8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-10-27 Rodrigo Kumpera + + * x86/x86-codegen.h: Add movddup. + 2008-10-24 Rodrigo Kumpera * x86/x86-codegen.h: Add signed pack with saturation. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 9c83b2f..6c977c8 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1731,6 +1731,7 @@ typedef enum { X86_SSE_HSUB = 0x7D, X86_SSE_MOVSHDUP = 0x16, X86_SSE_MOVSLDUP = 0x12, + X86_SSE_MOVDDUP = 0x12, X86_SSE_PAND = 0xDB, X86_SSE_POR = 0xEB, @@ -1878,6 +1879,12 @@ typedef enum { *(inst)++ = (unsigned char)imm; \ } while (0) +#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \ + do { \ + x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \ + *(inst)++ = (unsigned char)(imm); \ + } while (0) + #define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x66; \ -- cgit v1.1 From eaf2804839ffb61912a8eeef7c3a58463aafcdd6 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 28 Oct 2008 19:24:34 +0000 Subject: 2008-10-28 Rodrigo Kumpera * x86/x86-codegen.h: Add long version of the packed integer ops. svn path=/trunk/mono/; revision=117292 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/ChangeLog b/ChangeLog index 23a67a8..ddc1ca2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-10-28 Rodrigo Kumpera + + * x86/x86-codegen.h: Add long version of the packed integer + ops. + 2008-10-27 Rodrigo Kumpera * x86/x86-codegen.h: Add movddup. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6c977c8..bccadab 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1740,10 +1740,12 @@ typedef enum { X86_SSE_PADDB = 0xFC, X86_SSE_PADDW = 0xFD, X86_SSE_PADDD = 0xFE, + X86_SSE_PADDQ = 0xD4, X86_SSE_PSUBB = 0xF8, X86_SSE_PSUBW = 0xF9, X86_SSE_PSUBD = 0xFA, + X86_SSE_PSUBQ = 0xFB, X86_SSE_PMAXSB = 0x3C, /*sse41*/ X86_SSE_PMAXSW = 0xEE, @@ -1767,10 +1769,12 @@ typedef enum { X86_SSE_PCMPEQB = 0x74, X86_SSE_PCMPEQW = 0x75, X86_SSE_PCMPEQD = 0x76, + X86_SSE_PCMPEQQ = 0x29, /*sse41*/ X86_SSE_PCMPGTB = 0x64, X86_SSE_PCMPGTW = 0x65, X86_SSE_PCMPGTD = 0x66, + X86_SSE_PCMPGTQ = 0x37, /*sse41*/ X86_SSE_PSADBW = 0xf6, @@ -1806,11 +1810,13 @@ typedef enum { X86_SSE_PMULLD = 0x40,/*sse41*/ X86_SSE_PMULHUW = 0xE4, X86_SSE_PMULHW = 0xE5, + X86_SSE_PMULUDQ = 0xF4, X86_SSE_PMOVMSKB = 0xD7, X86_SSE_PSHIFTW = 0x71, X86_SSE_PSHIFTD = 0x72, + X86_SSE_PSHIFTQ = 0x73, X86_SSE_SHR = 2, X86_SSE_SAR = 4, X86_SSE_SHL = 6, @@ -1823,6 +1829,8 @@ typedef enum { X86_SSE_PSRAD_REG = 0xE2, X86_SSE_PSLLD_REG = 0xF2, + X86_SSE_PSRLQ_REG = 0xD3, + X86_SSE_PSLLQ_REG = 0xF3, } X86_SSE_Opcode; -- cgit v1.1 From 42f47d048391da1619aa26b70e54980c4c33e3f2 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 3 Nov 2008 14:41:44 +0000 Subject: 2008-11-03 Rodrigo Kumpera * x86/x86-codegen.h: Add prefetch instruction and x86_sse_alu_reg_membase macro. svn path=/trunk/mono/; revision=117753 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/ChangeLog b/ChangeLog index ddc1ca2..ec23e10 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-11-03 Rodrigo Kumpera + + * x86/x86-codegen.h: Add prefetch instruction + and x86_sse_alu_reg_membase macro. + 2008-10-28 Rodrigo Kumpera * x86/x86-codegen.h: Add long version of the packed integer diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index bccadab..771e689 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1831,6 +1831,8 @@ typedef enum { X86_SSE_PSRLQ_REG = 0xD3, X86_SSE_PSLLQ_REG = 0xF3, + + X86_SSE_PREFETCH = 0x18 } X86_SSE_Opcode; @@ -1858,6 +1860,13 @@ typedef enum { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_reg_membase(inst,opc,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)(opc); \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x66; \ -- cgit v1.1 From bfe79f71f1352fbbfb696de3b0c093562b6fefb5 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 4 Nov 2008 20:17:31 +0000 Subject: 2008-11-04 Rodrigo Kumpera * x86/x86-codegen.h: Add store nta. svn path=/trunk/mono/; revision=117921 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index ec23e10..560e543 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-11-04 Rodrigo Kumpera + + * x86/x86-codegen.h: Add store nta. + 2008-11-03 Rodrigo Kumpera * x86/x86-codegen.h: Add prefetch instruction diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 771e689..1a77bdc 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1832,7 +1832,8 @@ typedef enum { X86_SSE_PSRLQ_REG = 0xD3, X86_SSE_PSLLQ_REG = 0xF3, - X86_SSE_PREFETCH = 0x18 + X86_SSE_PREFETCH = 0x18, + X86_SSE_MOVNTPS = 0x2B } X86_SSE_Opcode; -- cgit v1.1 From 6c930cb35aa08e10abba989d9cb8560b4636ba73 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 13 Nov 2008 22:51:27 +0000 Subject: 2008-11-13 Rodrigo Kumpera * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg. svn path=/trunk/mono/; revision=118779 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 9 --------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/ChangeLog b/ChangeLog index 560e543..f0a5037 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-11-13 Rodrigo Kumpera + + * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg. + 2008-11-04 Rodrigo Kumpera * x86/x86-codegen.h: Add store nta. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 1a77bdc..b2da269 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1965,15 +1965,6 @@ typedef enum { x86_reg_emit ((inst), (dreg), (sreg)); \ } while (0) -#define x86_pshufd_reg_reg(inst,dreg,sreg,mask) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x70; \ - x86_reg_emit ((inst), (dreg), (sreg)); \ - *(inst)++ = (unsigned char)mask; \ - } while (0) - #define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \ do { \ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \ -- cgit v1.1 From 59483983e37bb55af19f4e98e3de2f1ad216989b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20F=C3=A4rber?= Date: Sat, 15 Nov 2008 10:59:47 +0000 Subject: 2008-11-15 Andreas Faerber * ppc/test.c: Add support for Mac OS X. This commit is licensed under the MIT X11 license. svn path=/trunk/mono/; revision=118924 --- ChangeLog | 6 ++++++ ppc/test.c | 7 ++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index f0a5037..f6e8d9e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2008-11-15 Andreas Faerber + + * ppc/test.c: Add support for Mac OS X. + + Code is contributed under MIT/X11 license. + 2008-11-13 Rodrigo Kumpera * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg. diff --git a/ppc/test.c b/ppc/test.c index f80e5bb..c19358d 100644 --- a/ppc/test.c +++ b/ppc/test.c @@ -3,6 +3,7 @@ /* don't run the resulting program, it will destroy your computer, * just objdump -d it to inspect we generated the correct assembler. + * On Mac OS X use otool[64] -v -t */ int main() { @@ -10,7 +11,11 @@ int main() { guint8 *p = code; guint8 *cp; - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); + printf (".text\n.align 4\n.globl main\n"); +#ifndef __APPLE__ + printf (".type main,@function\n"); +#endif + printf ("main:\n"); ppc_stwu (p, ppc_r1, -32, ppc_r1); ppc_mflr (p, ppc_r0); -- cgit v1.1 From 74b70bd5f7bc3b40a919c6c8b06c22facae8df6b Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 17 Nov 2008 17:00:22 +0000 Subject: 2008-11-17 Rodrigo Kumpera * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros. svn path=/trunk/mono/; revision=119057 --- ChangeLog | 5 +++++ x86/x86-codegen.h | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index f6e8d9e..e1da489 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-11-17 Rodrigo Kumpera + + * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant + and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros. + 2008-11-15 Andreas Faerber * ppc/test.c: Add support for Mac OS X. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index b2da269..7ca0cac 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1833,7 +1833,8 @@ typedef enum { X86_SSE_PSLLQ_REG = 0xF3, X86_SSE_PREFETCH = 0x18, - X86_SSE_MOVNTPS = 0x2B + X86_SSE_MOVNTPS = 0x2B, + X86_SSE_MOVHPD_MEMBASE_REG = 0x17 } X86_SSE_Opcode; @@ -1868,6 +1869,13 @@ typedef enum { x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ } while (0) +#define x86_sse_alu_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)(opc); \ + x86_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x66; \ @@ -1912,6 +1920,11 @@ typedef enum { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ + } while (0) #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ do { \ -- cgit v1.1 From 484dbedc8136e413a77ee11938d40e713cfefcfd Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Tue, 18 Nov 2008 10:17:36 +0000 Subject: 2008-11-18 Mark Probst * ppc/ppc-codegen.h: A few fixes and additions. Based on code submitted by andreas.faerber@web.de at https://bugzilla.novell.com/show_bug.cgi?id=324134 under the X11/MIT license. svn path=/trunk/mono/; revision=119140 --- ChangeLog | 8 ++++++++ ppc/ppc-codegen.h | 36 ++++++++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/ChangeLog b/ChangeLog index e1da489..17f1339 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2008-11-18 Mark Probst + + * ppc/ppc-codegen.h: A few fixes and additions. + + Based on code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + 2008-11-17 Rodrigo Kumpera * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 9382fd9..087249f 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -1,5 +1,11 @@ /* + Authors: + Radek Doulik + Christopher Taylor + Andreas Faerber + Copyright (C) 2001 Radek Doulik + Copyright (C) 2007-2008 Andreas Faerber for testing do the following: ./test | as -o test.o */ @@ -134,8 +140,8 @@ enum { #define ppc_break(c) ppc_tw((c),31,0,0) #define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) #define ppc_addis(c,D,A,d) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) -#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v)); -#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v)); +#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v)) +#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v)) #define ppc_lwz(c,D,d,a) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_lhz(c,D,d,a) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_lbz(c,D,d,a) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) @@ -244,17 +250,18 @@ my and Ximian's copyright to this code. ;) #define ppc_bnectrp(c,BO,BI) ppc_bcctr(c,BO,BI) #define ppc_bnectrlp(c,BO,BI) ppc_bcctr(c,BO,BI) -#define ppc_bclrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (16 << 1) | LK) -#define ppc_bclr(c,BO,BI) ppc_bclrx(c,BO,BI,0) -#define ppc_bclrl(c,BO,BI) ppc_bclrx(c,BO,BI,1) +#define ppc_bclrx(c,BO,BI,BH,LK) ppc_emit32(c, (19 << 26) | ((BO) << 21 )| ((BI) << 16) | (0 << 13) | ((BH) << 11) | (16 << 1) | (LK)) +#define ppc_bclr(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,0) +#define ppc_bclrl(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,1) -#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI) -#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI) +#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI,0) +#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI,0) -#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (0x00000 << 1) | 0 ) +#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (0 << 1) | 0) #define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B)) -#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (B << 11) | (32 << 1) | 0 ) +#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (32 << 1) | 0) #define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B)) +#define ppc_cmpw(c,cfrD,A,B) ppc_cmp(c, (cfrD), 0, (A), (B)) #define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc) #define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0) @@ -540,9 +547,18 @@ my and Ximian's copyright to this code. ;) #define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0) #define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1) -#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc) +#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | ((S) << 21) | ((A) << 16) | ((SH) << 11) | ((MB) << 6) | ((ME) << 1) | (Rc)) #define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0) #define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1) +#define ppc_extlwi(c,A,S,n,b) ppc_rlwinm(c,A,S, b, 0, (n) - 1) +#define ppc_extrwi(c,A,S,n,b) ppc_rlwinm(c,A,S, (b) + (n), 32 - (n), 31) +#define ppc_rotlwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31) +#define ppc_rotrwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), 0, 31) +#define ppc_slwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31 - (n)) +#define ppc_srwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), n, 31) +#define ppc_clrlwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, n, 31) +#define ppc_clrrwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, 0, 31 - (n)) +#define ppc_clrlslwi(c,A,S,b,n) ppc_rlwinm(c,A,S, n, (b) - (n), 31 - (n)) #define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc) #define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0) -- cgit v1.1 From 406790f1df77c80b5b28bcac561e7b6c6cd1a3a6 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Tue, 18 Nov 2008 10:25:11 +0000 Subject: 2008-11-18 Mark Probst * ppc/ppc-codegen.h: PPC64 code generation macros. Based on code submitted by andreas.faerber@web.de at https://bugzilla.novell.com/show_bug.cgi?id=324134 under the X11/MIT license. svn path=/trunk/mono/; revision=119141 --- ChangeLog | 8 +++ ppc/ppc-codegen.h | 162 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 166 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index 17f1339..396a717 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,13 @@ 2008-11-18 Mark Probst + * ppc/ppc-codegen.h: PPC64 code generation macros. + + Based on code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + +2008-11-18 Mark Probst + * ppc/ppc-codegen.h: A few fixes and additions. Based on code submitted by andreas.faerber@web.de at diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 087249f..c157d6c 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -128,14 +128,39 @@ enum { #define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) #define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) -#define ppc_load(c,D,v) do { \ +#define ppc_load32(c,D,v) G_STMT_START { \ + ppc_lis ((c), (D), (guint32)(v) >> 16); \ + ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ + } G_STMT_END + +#ifndef __mono_ppc64__ + +#define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v)) + +#define ppc_load(c,D,v) G_STMT_START { \ if (ppc_is_imm16 ((guint32)(v))) { \ ppc_li ((c), (D), (guint16)(guint32)(v)); \ } else { \ - ppc_lis ((c), (D), (guint32)(v) >> 16); \ - ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ + ppc_load32 ((c), (D), (guint32)(v)); \ } \ - } while (0) + } G_STMT_END + +#define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) +#define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) +#define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) + +#define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) +#define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) +#define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) +#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) + +#endif + +#define ppc_opcode(c) ((c) >> 26) +#define ppc_split_5_1_1(x) (((x) >> 5) & 0x1) +#define ppc_split_5_1_5(x) ((x) & 0x1F) +#define ppc_split_5_1(x) ((ppc_split_5_1_5(x) << 1) | ppc_split_5_1_1(x)) #define ppc_break(c) ppc_tw((c),31,0,0) #define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) @@ -658,4 +683,133 @@ my and Ximian's copyright to this code. ;) /* this marks the end of my work, ct */ +/* PPC64 */ + +#ifdef __mono_ppc64__ + +#define ppc_load_sequence(c,D,v) G_STMT_START { \ + ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \ + ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \ + ppc_sldi ((c), (D), (D), 32); \ + ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \ + ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \ + } G_STMT_END + +#define ppc_is_imm32(val) ((glong)(val) >= (glong)-(1L<<31) && (glong)(val) <= (glong)((1L<<31)-1)) + +#define ppc_load(c,D,v) G_STMT_START { \ + if (ppc_is_imm16 ((gulong)(v))) { \ + ppc_li ((c), (D), (guint16)(guint64)(v)); \ + } else if (ppc_is_imm32 ((gulong)(v))) { \ + ppc_load32 ((c), (D), (guint32)(guint64)(v)); \ + } else { \ + ppc_load_sequence ((c), (D), (guint64)(v)); \ + } \ + } G_STMT_END + +#define ppc_load_func(c,D,v) G_STMT_START { \ + ppc_load_sequence ((c), ppc_r11, (guint64)(v)); \ + ppc_ld ((c), ppc_r2, 8, ppc_r11); \ + ppc_ld ((c), (D), 0, ppc_r11); \ + } G_STMT_END + +#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d) >> 2, (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d) >> 2, (A)) +#define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) +#define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) + +#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d) >> 2, (A)) +#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d) >> 2, (A)) +#define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) +#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) + +#define ppc_extswx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (0 << 11) | (986 << 1) | (Rc)) +#define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0) +#define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1) + +#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc)) +#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0) +#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1) + +#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc)) +#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0) +#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1) + +#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 0) +#define ppc_ldarx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (84 << 1) | 0) +#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 1) +#define ppc_ldux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (53 << 1) | 0) +#define ppc_ldx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (21 << 1) | 0) + +#define ppc_mulhdx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (73 << 1) | (Rc)) +#define ppc_mulhd(c,D,A,B) ppc_mulhdx(c,D,A,B,0) +#define ppc_mulhdd(c,D,A,B) ppc_mulhdx(c,D,A,B,1) +#define ppc_mulhdux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (9 << 1) | (Rc)) +#define ppc_mulhdu(c,D,A,B) ppc_mulhdux(c,D,A,B,0) +#define ppc_mulhdud(c,D,A,B) ppc_mulhdux(c,D,A,B,1) + +#define ppc_mulldx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (233 << 1) | (Rc)) +#define ppc_mulld(c,D,A,B) ppc_mulldx(c,D,A,B,0,0) +#define ppc_mulldd(c,D,A,B) ppc_mulldx(c,D,A,B,0,1) +#define ppc_mulldo(c,D,A,B) ppc_mulldx(c,D,A,B,1,0) +#define ppc_mulldod(c,D,A,B) ppc_mulldx(c,D,A,B,1,1) + +#define ppc_rldclx(c,A,S,B,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(MB) << 5) | (8 << 1) | (Rc)) +#define ppc_rldcl(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,0) +#define ppc_rldcld(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,1) +#define ppc_rotld(c,A,S,B) ppc_rldcl(c, A, S, B, 0) + +#define ppc_rldcrx(c,A,S,B,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(ME) << 5) | (9 << 1) | (Rc)) +#define ppc_rldcr(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,0) +#define ppc_rldcrd(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,1) + +#define ppc_rldicx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (2 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc)) +#define ppc_rldic(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,0) +#define ppc_rldicd(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,1) + +#define ppc_rldiclx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (0 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc)) +#define ppc_rldicl(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,0) +#define ppc_rldicld(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,1) +#define ppc_extrdi(c,A,S,n,b) ppc_rldicl(c,A,S, (b) + (n), 64 - (n)) +#define ppc_rotldi(c,A,S,n) ppc_rldicl(c,A,S, n, 0) +#define ppc_rotrdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), 0) +#define ppc_srdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), n) +#define ppc_clrldi(c,A,S,n) ppc_rldicl(c,A,S, 0, n) + +#define ppc_rldicrx(c,A,S,SH,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(ME) << 5) | (1 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc)) +#define ppc_rldicr(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,0) +#define ppc_rldicrd(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,1) +#define ppc_extldi(c,A,S,n,b) ppc_rldicr(c, A, S, b, (n) - 1) +#define ppc_sldi(c,A,S,n) ppc_rldicr(c, A, S, n, 63 - (n)) +#define ppc_clrrdi(c,A,S,n) ppc_rldicr(c, A, S, 0, 63 - (n)) + +#define ppc_rldimix(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (3 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc)) +#define ppc_rldimi(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,0) +#define ppc_rldimid(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,1) + +#define ppc_slbia(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (498 << 1) | 0) +#define ppc_slbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | ((B) << 11) | (434 << 1) | 0) +#define ppc_sldx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (27 << 1) | (Rc)) +#define ppc_sld(c,A,S,B) ppc_sldx(c,S,A,B,0) +#define ppc_sldd(c,A,S,B) ppc_sldx(c,S,A,B,1) + +#define ppc_sradx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (794 << 1) | (Rc)) +#define ppc_srad(c,A,S,B) ppc_sradx(c,S,A,B,0) +#define ppc_sradd(c,A,S,B) ppc_sradx(c,S,A,B,1) +#define ppc_sradix(c,S,A,SH,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (((SH) & 31) << 11) | (413 << 2) | (((SH) >> 5) << 1) | (Rc)) +#define ppc_sradi(c,A,S,SH) ppc_sradix(c,S,A,SH,0) +#define ppc_sradid(c,A,S,SH) ppc_sradix(c,S,A,SH,1) + +#define ppc_srdx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (539 << 1) | (Rc)) +#define ppc_srd(c,A,S,B) ppc_srdx(c,S,A,B,0) +#define ppc_srdd(c,A,S,B) ppc_srdx(c,S,A,B,1) + +#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 0) +#define ppc_stdcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (214 << 1) | 1) +#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 1) +#define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0) +#define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0) + +#endif + #endif -- cgit v1.1 From dbebfad82832bf895561902dd527d2e4c158c2c9 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Tue, 18 Nov 2008 15:32:41 +0000 Subject: 2008-11-18 Mark Probst * ppc/ppc-codegen.h: Macro for nop added. 2008-11-18 Mark Probst * mini-ppc64.c, mini-ppc64.h, tramp-ppc64.c, cpu-ppc64.md: Changes for PPC64. An empty program runs now. svn path=/trunk/mono/; revision=119162 --- ChangeLog | 4 ++++ ppc/ppc-codegen.h | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 396a717..d817788 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,9 @@ 2008-11-18 Mark Probst + * ppc/ppc-codegen.h: Macro for nop added. + +2008-11-18 Mark Probst + * ppc/ppc-codegen.h: PPC64 code generation macros. Based on code submitted by andreas.faerber@web.de at diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index c157d6c..d25124b 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -175,8 +175,9 @@ enum { #define ppc_stb(c,S,d,a) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_stwu(c,s,d,a) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) #define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) -#define ppc_ori(c,S,A,u) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(u)) #define ppc_mr(c,a,s) ppc_or (c, a, s, s) +#define ppc_ori(c,S,A,u) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(u)) +#define ppc_nop(c) ppc_ori (c, 0, 0, 0) #define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1)) #define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr) #define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1)) -- cgit v1.1 From b31b375fc1354cc835d183e7e251e602eeb038c5 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 18 Nov 2008 21:56:49 +0000 Subject: 2008-11-17 Rodrigo Kumpera * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ. * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg macro. svn path=/trunk/mono/; revision=119227 --- ChangeLog | 7 +++++++ x86/x86-codegen.h | 11 +++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index d817788..2fc3e4f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -20,6 +20,13 @@ 2008-11-17 Rodrigo Kumpera + * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ. + + * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg + macro. + +2008-11-17 Rodrigo Kumpera + * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 7ca0cac..1b0d7c4 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1774,7 +1774,7 @@ typedef enum { X86_SSE_PCMPGTB = 0x64, X86_SSE_PCMPGTW = 0x65, X86_SSE_PCMPGTD = 0x66, - X86_SSE_PCMPGTQ = 0x37, /*sse41*/ + X86_SSE_PCMPGTQ = 0x37, /*sse42*/ X86_SSE_PSADBW = 0xf6, @@ -1834,7 +1834,8 @@ typedef enum { X86_SSE_PREFETCH = 0x18, X86_SSE_MOVNTPS = 0x2B, - X86_SSE_MOVHPD_MEMBASE_REG = 0x17 + X86_SSE_MOVHPD_MEMBASE_REG = 0x17, + X86_SSE_MOVSD_MEMBASE_REG = 0x11, } X86_SSE_Opcode; @@ -1893,6 +1894,12 @@ typedef enum { x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_sd_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF2; \ + x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ + } while (0) + #define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0xF3; \ -- cgit v1.1 From 5c317c4676f911a0620b54e6668cf66a5c0dda31 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 18 Nov 2008 21:56:58 +0000 Subject: 2008-11-18 Rodrigo Kumpera * x86/x86-codegen.h: Add PINSR B/W/D. svn path=/trunk/mono/; revision=119229 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 2fc3e4f..4160bbd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-11-18 Rodrigo Kumpera + + * x86/x86-codegen.h: Add PINSR B/W/D. + 2008-11-18 Mark Probst * ppc/ppc-codegen.h: Macro for nop added. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 1b0d7c4..984afd6 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1836,6 +1836,10 @@ typedef enum { X86_SSE_MOVNTPS = 0x2B, X86_SSE_MOVHPD_MEMBASE_REG = 0x17, X86_SSE_MOVSD_MEMBASE_REG = 0x11, + + X86_SSE_PINSRB = 0x20,/*sse41*/ + X86_SSE_PINSRW = 0xC4, + X86_SSE_PINSRD = 0x22,/*sse41*/ } X86_SSE_Opcode; -- cgit v1.1 From 3225dc9308230de9fbbca884c05e6b150a8e0333 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 20 Nov 2008 14:12:04 +0000 Subject: 2008-11-20 Rodrigo Kumpera * x86/x86-codegen.h: Add PEXTR B/W/D. svn path=/trunk/mono/; revision=119441 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4160bbd..0b78362 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-11-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add PEXTR B/W/D. + 2008-11-18 Rodrigo Kumpera * x86/x86-codegen.h: Add PINSR B/W/D. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 984afd6..38270a6 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1840,6 +1840,10 @@ typedef enum { X86_SSE_PINSRB = 0x20,/*sse41*/ X86_SSE_PINSRW = 0xC4, X86_SSE_PINSRD = 0x22,/*sse41*/ + + X86_SSE_PEXTRB = 0x14,/*sse41*/ + X86_SSE_PEXTRW = 0xC5, + X86_SSE_PEXTRD = 0x16,/*sse41*/ } X86_SSE_Opcode; -- cgit v1.1 From daa4af175e0f8b95888918dbf429c7d5f66d3c07 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 20 Nov 2008 14:28:51 +0000 Subject: 2008-11-20 Zoltan Varga * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only used by the interpreter. svn path=/trunk/mono/; revision=119444 --- ChangeLog | 5 +++++ arm/Makefile.am | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 0b78362..f9c5dc8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-11-20 Zoltan Varga + + * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only + used by the interpreter. + 2008-11-20 Rodrigo Kumpera * x86/x86-codegen.h: Add PEXTR B/W/D. diff --git a/arm/Makefile.am b/arm/Makefile.am index ba7a60d..180be53 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -7,7 +7,6 @@ BUILT_SOURCES = arm_dpimacros.h arm_fpamacros.h arm_vfpmacros.h libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ - tramp.c \ arm-codegen.c \ arm-codegen.h \ arm-dis.c \ -- cgit v1.1 From 14651d4fa6b039131000aa5157ed99b7526f89b8 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Thu, 20 Nov 2008 21:27:36 +0000 Subject: 2008-11-20 Mark Probst * ppc/ppc-codegen.h: 64 bit division opcodes. Code submitted by andreas.faerber@web.de at https://bugzilla.novell.com/show_bug.cgi?id=324134 under the X11/MIT license. svn path=/trunk/mono/; revision=119515 --- ChangeLog | 8 ++++++++ ppc/ppc-codegen.h | 12 ++++++++++++ 2 files changed, 20 insertions(+) diff --git a/ChangeLog b/ChangeLog index f9c5dc8..4fbaf76 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2008-11-20 Mark Probst + + * ppc/ppc-codegen.h: 64 bit division opcodes. + + Code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + 2008-11-20 Zoltan Varga * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index d25124b..e96a869 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -724,6 +724,18 @@ my and Ximian's copyright to this code. ;) #define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) +#define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc)) +#define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0) +#define ppc_divdd(c,D,A,B) ppc_divdx(c,D,A,B,0,1) +#define ppc_divdo(c,D,A,B) ppc_divdx(c,D,A,B,1,0) +#define ppc_divdod(c,D,A,B) ppc_divdx(c,D,A,B,1,1) + +#define ppc_divdux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (457 << 1) | (Rc)) +#define ppc_divdu(c,D,A,B) ppc_divdux(c,D,A,B,0,0) +#define ppc_divdud(c,D,A,B) ppc_divdux(c,D,A,B,0,1) +#define ppc_divduo(c,D,A,B) ppc_divdux(c,D,A,B,1,0) +#define ppc_divduod(c,D,A,B) ppc_divdux(c,D,A,B,1,1) + #define ppc_extswx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (0 << 11) | (986 << 1) | (Rc)) #define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0) #define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1) -- cgit v1.1 From 96ed3f7c4ea51c61ec3b5d0600c32fa003b8e4f7 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Thu, 20 Nov 2008 21:36:13 +0000 Subject: 2008-11-20 Mark Probst * decompose.c: Decompose carry and overflow add on PPC64 like on other 64 bit archs. Don't decompose sub at all on PPC64. * mini-ppc64.c, exceptions-ppc64.c, tramp-ppc64.c, cpu-ppc64.md: Several fixes and new opcodes. Now PPC64 runs (but doesn't pass) basic-long.exe. 2008-11-20 Mark Probst * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in ppc_load_func to fix the 2 bit shift. svn path=/trunk/mono/; revision=119516 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4fbaf76..ed5e7d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,10 @@ 2008-11-20 Mark Probst + * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in + ppc_load_func to fix the 2 bit shift. + +2008-11-20 Mark Probst + * ppc/ppc-codegen.h: 64 bit division opcodes. Code submitted by andreas.faerber@web.de at diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index e96a869..4b34c46 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -710,8 +710,8 @@ my and Ximian's copyright to this code. ;) #define ppc_load_func(c,D,v) G_STMT_START { \ ppc_load_sequence ((c), ppc_r11, (guint64)(v)); \ - ppc_ld ((c), ppc_r2, 8, ppc_r11); \ - ppc_ld ((c), (D), 0, ppc_r11); \ + ppc_load_reg ((c), ppc_r2, 8, ppc_r11); \ + ppc_load_reg ((c), (D), 0, ppc_r11); \ } G_STMT_END #define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d) >> 2, (A)) -- cgit v1.1 From 01e12b57e8773f9c65c64a91f956b0fa9335d095 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 20 Nov 2008 23:44:44 +0000 Subject: 2008-11-20 Rodrigo Kumpera * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants. * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm. * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently. svn path=/trunk/mono/; revision=119545 --- ChangeLog | 8 ++++++++ x86/x86-codegen.h | 57 ++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/ChangeLog b/ChangeLog index ed5e7d0..928cfa7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2008-11-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants. + + * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm. + + * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently. + 2008-11-20 Mark Probst * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 38270a6..3ceb548 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1837,6 +1837,9 @@ typedef enum { X86_SSE_MOVHPD_MEMBASE_REG = 0x17, X86_SSE_MOVSD_MEMBASE_REG = 0x11, + X86_SSE_MOVS_REG_MEMBASE = 0x10, + X86_SSE_MOVS_MEMBASE_REG = 0x11, + X86_SSE_PINSRB = 0x20,/*sse41*/ X86_SSE_PINSRW = 0xC4, X86_SSE_PINSRD = 0x22,/*sse41*/ @@ -1885,17 +1888,50 @@ typedef enum { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) + #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0x66; \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ + } while (0) + +#define x86_sse_alu_pd_reg_membase(inst,opc,dreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \ + } while (0) + +#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \ + do { \ + x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \ + *(inst)++ = (unsigned char)(imm); \ + } while (0) + +#define x86_sse_alu_pd_reg_membase_imm(inst,opc,dreg,basereg,disp,imm) \ + do { \ + x86_sse_alu_pd_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \ + *(inst)++ = (unsigned char)(imm); \ + } while (0) + + #define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \ do { \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) +#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \ + do { \ + x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ + *(inst)++ = (unsigned char)imm; \ + } while (0) + + #define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0xF2; \ @@ -1908,23 +1944,20 @@ typedef enum { x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ } while (0) + #define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ do { \ *(inst)++ = (unsigned char)0xF3; \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) -#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \ - do { \ - x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ - *(inst)++ = (unsigned char)imm; \ +#define x86_sse_alu_ss_membase_reg(inst,opc,basereg,disp,reg) \ + do { \ + *(inst)++ = (unsigned char)0xF3; \ + x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ } while (0) -#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \ - do { \ - x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \ - *(inst)++ = (unsigned char)(imm); \ - } while (0) + #define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ do { \ @@ -1935,12 +1968,6 @@ typedef enum { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) -#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \ - do { \ - *(inst)++ = (unsigned char)0x66; \ - x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ - } while (0) - #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ do { \ *(inst)++ = (unsigned char)0x0f; \ -- cgit v1.1 From dc227de13e4f1cee33c379401adbb90a225e680a Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 20 Nov 2008 23:45:00 +0000 Subject: 2008-11-20 Rodrigo Kumpera * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD. svn path=/trunk/mono/; revision=119549 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 6 +++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 928cfa7..ab8dbc8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,9 @@ 2008-11-20 Rodrigo Kumpera + * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD. + +2008-11-20 Rodrigo Kumpera + * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants. * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm. diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 3ceb548..966c53d 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1834,11 +1834,11 @@ typedef enum { X86_SSE_PREFETCH = 0x18, X86_SSE_MOVNTPS = 0x2B, + X86_SSE_MOVHPD_REG_MEMBASE = 0x16, X86_SSE_MOVHPD_MEMBASE_REG = 0x17, - X86_SSE_MOVSD_MEMBASE_REG = 0x11, - X86_SSE_MOVS_REG_MEMBASE = 0x10, - X86_SSE_MOVS_MEMBASE_REG = 0x11, + X86_SSE_MOVSD_REG_MEMBASE = 0x10, + X86_SSE_MOVSD_MEMBASE_REG = 0x11, X86_SSE_PINSRB = 0x20,/*sse41*/ X86_SSE_PINSRW = 0xC4, -- cgit v1.1 From b45b096d6d4246f16d05e42838122f1d58f875f6 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Fri, 21 Nov 2008 00:21:53 +0000 Subject: 2008-11-21 Mark Probst * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: Several fixes. Now PPC64 passes basic-long.exe. 2008-11-21 Mark Probst * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit values. svn path=/trunk/mono/; revision=119560 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index ab8dbc8..7953c0c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-11-21 Mark Probst + + * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit + values. + 2008-11-20 Rodrigo Kumpera * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 4b34c46..6b4c8c5 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -125,8 +125,8 @@ enum { #define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0) -#define ppc_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) -#define ppc_is_uimm16(val) ((gint)(val) >= 0 && (gint)(val) <= 65535) +#define ppc_is_imm16(val) ((glong)(val) >= (glong)-(1L<<15) && (glong)(val) <= (glong)((1L<<15)-1L)) +#define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L) #define ppc_load32(c,D,v) G_STMT_START { \ ppc_lis ((c), (D), (guint32)(v) >> 16); \ -- cgit v1.1 From 742361c7bfc21faf8485d20d00cdfc58c04800f9 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Fri, 28 Nov 2008 19:06:34 +0000 Subject: 2008-11-28 Mark Probst * mini-ppc64.c, mini-ppc64.h: Enable generalized IMT thunks and make them work. 2008-11-28 Mark Probst * object.c: Don't put function descriptors into generalized IMT thunks. 2008-11-28 Mark Probst * ppc/ppc-codegen.h: #define for the maximum length of a load sequence. svn path=/trunk/mono/; revision=120248 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/ChangeLog b/ChangeLog index 7953c0c..3edbc2c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-11-28 Mark Probst + + * ppc/ppc-codegen.h: #define for the maximum length of a load + sequence. + 2008-11-21 Mark Probst * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 6b4c8c5..a0241ac 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -137,6 +137,8 @@ enum { #define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v)) +#define PPC_LOAD_SEQUENCE_LENGTH 8 + #define ppc_load(c,D,v) G_STMT_START { \ if (ppc_is_imm16 ((guint32)(v))) { \ ppc_li ((c), (D), (guint16)(guint32)(v)); \ @@ -696,6 +698,8 @@ my and Ximian's copyright to this code. ;) ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \ } G_STMT_END +#define PPC_LOAD_SEQUENCE_LENGTH 20 + #define ppc_is_imm32(val) ((glong)(val) >= (glong)-(1L<<31) && (glong)(val) <= (glong)((1L<<31)-1)) #define ppc_load(c,D,v) G_STMT_START { \ -- cgit v1.1 From 7f226f68fb98684dafd132d90ca1a24635c33557 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Tue, 2 Dec 2008 16:03:45 +0000 Subject: 2008-12-02 Mark Probst * tramp-ppc64.c (mono_arch_create_rgctx_lazy_fetch_trampoline): Fix trampoline size. * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: A few floating point conversion opcodes are implemented natively instead via emulation. 2008-12-02 Mark Probst * ppc/ppc-codegen.h: Opcodes for floating point conversions from 64 bit integers. Code submitted by andreas.faerber@web.de at https://bugzilla.novell.com/show_bug.cgi?id=324134 under the X11/MIT license. svn path=/trunk/mono/; revision=120492 --- ChangeLog | 9 +++++++++ ppc/ppc-codegen.h | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/ChangeLog b/ChangeLog index 3edbc2c..4aad234 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,12 @@ +2008-12-02 Mark Probst + + * ppc/ppc-codegen.h: Opcodes for floating point conversions from + 64 bit integers. + + Code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + 2008-11-28 Mark Probst * ppc/ppc-codegen.h: #define for the maximum length of a load diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index a0241ac..51cedb6 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -744,6 +744,10 @@ my and Ximian's copyright to this code. ;) #define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0) #define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1) +#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc)) +#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0) +#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1) + #define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc)) #define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0) #define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1) -- cgit v1.1 From dd397c9fd311f0411694ff1cc7904aec14f4551b Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Fri, 5 Dec 2008 16:42:24 +0000 Subject: 2008-12-05 Mark Probst * tramp-ppc.c, mini-ppc.c, mini-ppc.h: Merged tramp-ppc.c with tramp-ppc64.c. * Makefile.am: Use tramp-ppc.c instead of tramp-ppc64.c. * tramp-ppc64.c: Removed. 2008-12-05 Mark Probst * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added ppc_load/store_multiple_regs and ppc_compare_reg_imm. svn path=/trunk/mono/; revision=120852 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4aad234..515e1ca 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-12-05 Mark Probst + + * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added + ppc_load/store_multiple_regs and ppc_compare_reg_imm. + 2008-12-02 Mark Probst * ppc/ppc-codegen.h: Opcodes for floating point conversions from diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 51cedb6..f00554c 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -147,15 +147,21 @@ enum { } \ } G_STMT_END +#define ppc_load_func(c,D,V) ppc_load ((c), (D), (V)) + #define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) #define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) #define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) #define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) +#define ppc_load_multiple_regs(c,D,A,d) ppc_lmw ((c), (D), (A), (d)) #define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) #define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) #define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) +#define ppc_store_multiple_regs(c,S,A,D) ppc_stmw ((c), (S), (A), (D)) + +#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B)) #endif @@ -722,11 +728,27 @@ my and Ximian's copyright to this code. ;) #define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d) >> 2, (A)) #define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) #define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) +#define ppc_load_multiple_regs(c,D,A,d) G_STMT_START { \ + int __i, __o = (d); \ + for (__i = (D); __i <= 31; ++__i) { \ + ppc_load_reg ((c), __i, __o, (A)); \ + __o += sizeof (gulong); \ + } \ + } G_STMT_END #define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d) >> 2, (A)) #define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d) >> 2, (A)) #define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) +#define ppc_store_multiple_regs(c,S,A,D) G_STMT_START { \ + int __i, __o = (D); \ + for (__i = (S); __i <= 31; ++__i) { \ + ppc_store_reg ((c), __i, __o, (A)); \ + __o += sizeof (gulong); \ + } \ + } G_STMT_END + +#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B)) #define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc)) #define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0) -- cgit v1.1 From 77eff8936b5e423be2712ba66cd8baba0edd2795 Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Fri, 5 Dec 2008 20:57:02 +0000 Subject: 2008-12-05 Mark Probst * mini-ppc.c: Some simple merges from mini-ppc64.c. 2008-12-05 Mark Probst * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence. Added ppc_compare_log. svn path=/trunk/mono/; revision=120890 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 515e1ca..68f8b0d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,10 @@ 2008-12-05 Mark Probst + * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence. + Added ppc_compare_log. + +2008-12-05 Mark Probst + * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added ppc_load/store_multiple_regs and ppc_compare_reg_imm. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index f00554c..78050c9 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -147,7 +147,7 @@ enum { } \ } G_STMT_END -#define ppc_load_func(c,D,V) ppc_load ((c), (D), (V)) +#define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V)) #define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) #define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) @@ -162,6 +162,7 @@ enum { #define ppc_store_multiple_regs(c,S,A,D) ppc_stmw ((c), (S), (A), (D)) #define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B)) +#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 0, (A), (B)) #endif @@ -749,6 +750,7 @@ my and Ximian's copyright to this code. ;) } G_STMT_END #define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B)) +#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 1, (A), (B)) #define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc)) #define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0) -- cgit v1.1 From 2dcc1868b2e2e830a9fa84a445ee79a8f6ab38ba Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Wed, 10 Dec 2008 09:33:57 +0000 Subject: 2008-12-10 Mark Probst * mini-ppc.c: Merged with mini-ppc64.c. * mini-ppc.h: Define PPC_MINIMAL_PARAM_AREA_SIZE on all targets. * Makefile.am: Use the same sources for PPC and PPC64. * mini-ppc64.c: Removed. 2008-12-10 Mark Probst * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64 merge. svn path=/trunk/mono/; revision=121203 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/ChangeLog b/ChangeLog index 68f8b0d..073117e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-12-10 Mark Probst + + * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64 + merge. + 2008-12-05 Mark Probst * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 78050c9..c44fa79 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -161,9 +161,20 @@ enum { #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) #define ppc_store_multiple_regs(c,S,A,D) ppc_stmw ((c), (S), (A), (D)) +#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B)) #define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B)) #define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 0, (A), (B)) +#define ppc_shift_left(c,A,S,B) ppc_slw((c), (S), (A), (B)) +#define ppc_shift_left_imm(c,A,S,n) ppc_slwi((c), (A), (S), (n)) + +#define ppc_shift_right_imm(c,A,S,B) ppc_srwi((c), (A), (S), (B)) +#define ppc_shift_right_arith_imm(c,A,S,B) ppc_srawi((c), (A), (S), (B)) + +#define ppc_multiply(c,D,A,B) ppc_mullw((c), (D), (A), (B)) + +#define ppc_clear_right_imm(c,A,S,n) ppc_clrrwi((c), (A), (S), (n)) + #endif #define ppc_opcode(c) ((c) >> 26) @@ -749,9 +760,20 @@ my and Ximian's copyright to this code. ;) } \ } G_STMT_END +#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 1, (A), (B)) #define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B)) #define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 1, (A), (B)) +#define ppc_shift_left(c,A,S,B) ppc_sld((c), (A), (S), (B)) +#define ppc_shift_left_imm(c,A,S,n) ppc_sldi((c), (A), (S), (n)) + +#define ppc_shift_right_imm(c,A,S,B) ppc_srdi((c), (A), (S), (B)) +#define ppc_shift_right_arith_imm(c,A,S,B) ppc_sradi((c), (A), (S), (B)) + +#define ppc_multiply(c,D,A,B) ppc_mulld((c), (D), (A), (B)) + +#define ppc_clear_right_imm(c,A,S,n) ppc_clrrdi((c), (A), (S), (n)) + #define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc)) #define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0) #define ppc_divdd(c,D,A,B) ppc_divdx(c,D,A,B,0,1) -- cgit v1.1 From 344a06253c9c1bad287e160b9714b0a052e68a09 Mon Sep 17 00:00:00 2001 From: Mark Mason Date: Sat, 13 Dec 2008 06:54:25 +0000 Subject: 2008-12-12 Mark Mason * mips/mips-codegen.h: Changes to support n32. Contributed under the MIT X11 license. svn path=/trunk/mono/; revision=121488 --- ChangeLog | 4 ++++ mips/mips-codegen.h | 38 +++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 073117e..6ca3f30 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2008-12-12 Mark Mason + + * mips/mips-codegen.h: Changes to support n32. + 2008-12-10 Mark Probst * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64 diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h index adb3200..e05bc38 100644 --- a/mips/mips-codegen.h +++ b/mips/mips-codegen.h @@ -16,6 +16,7 @@ enum { mips_a1, mips_a2, mips_a3, +#if _MIPS_SIM == _ABIO32 mips_t0, /* 8 temporaries */ mips_t1, mips_t2, @@ -24,6 +25,16 @@ enum { mips_t5, mips_t6, mips_t7, +#elif _MIPS_SIM == _ABIN32 + mips_a4, /* 4 more argument registers */ + mips_a5, + mips_a6, + mips_a7, + mips_t0, /* 4 temporaries */ + mips_t1, + mips_t2, + mips_t3, +#endif mips_s0, /* 16 calle saved */ mips_s1, mips_s2, @@ -148,6 +159,30 @@ enum { MIPS_FPU_NGT }; +#if SIZEOF_REGISTER == 4 + +#define MIPS_SW mips_sw +#define MIPS_LW mips_lw +#define MIPS_ADDU mips_addu +#define MIPS_ADDIU mips_addiu +#define MIPS_SWC1 mips_swc1 +#define MIPS_LWC1 mips_lwc1 +#define MIPS_MOVE mips_move + +#elif SIZEOF_REGISTER == 8 + +#define MIPS_SW mips_sd +#define MIPS_LW mips_ld +#define MIPS_ADDU mips_daddu +#define MIPS_ADDIU mips_daddiu +#define MIPS_SWC1 mips_sdc1 +#define MIPS_LWC1 mips_ldc1 +#define MIPS_MOVE mips_dmove + +#else +#error Unknown SIZEOF_REGISTER +#endif + #define mips_emit32(c,x) do { \ *((guint32 *) (void *)(c)) = x; \ (c) = (typeof(c))(((guint32 *)(void *)(c)) + 1); \ @@ -301,7 +336,8 @@ enum { #define mips_swr(c,src,base,offset) mips_format_i(c,54,base,src,offset) /* misc and coprocessor ops */ -#define mips_move(c,dest,src) mips_add(c,dest,src,mips_zero) +#define mips_move(c,dest,src) mips_addu(c,dest,src,mips_zero) +#define mips_dmove(c,dest,src) mips_daddu(c,dest,src,mips_zero) #define mips_nop(c) mips_sll(c,0,0,0) #define mips_break(c,code) mips_emit32(c, ((code)<<6)|13) #define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16) -- cgit v1.1 From 792160756d6ef76711408f151838c3f5a5f8d83b Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Fri, 19 Dec 2008 19:46:04 +0000 Subject: 2008-12-19 Mark Probst * ppc/ppc-codegen.h: Fixed the argument order for lwzu in ppc_load_reg_update. svn path=/trunk/mono/; revision=121883 --- ChangeLog | 5 +++++ ppc/ppc-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 6ca3f30..15852c4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2008-12-19 Mark Probst + + * ppc/ppc-codegen.h: Fixed the argument order for lwzu in + ppc_load_reg_update. + 2008-12-12 Mark Mason * mips/mips-codegen.h: Changes to support n32. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index c44fa79..f1552a4 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -150,7 +150,7 @@ enum { #define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V)) #define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (A), (d)) #define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) #define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) #define ppc_load_multiple_regs(c,D,A,d) ppc_lmw ((c), (D), (A), (d)) -- cgit v1.1 From f228d47d2afc549321cec800466e6bc1cde631bb Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 19 Jan 2009 19:47:54 +0000 Subject: 2009-01-19 Rodrigo Kumpera * x86/x86-codegen.h: Add x86_movd_xreg_membase. svn path=/trunk/mono/; revision=123825 --- ChangeLog | 4 ++++ x86/x86-codegen.h | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/ChangeLog b/ChangeLog index 15852c4..f76c0b5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-01-19 Rodrigo Kumpera + + * x86/x86-codegen.h: Add x86_movd_xreg_membase. + 2008-12-19 Mark Probst * ppc/ppc-codegen.h: Fixed the argument order for lwzu in diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 966c53d..5bdb79a 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -2020,6 +2020,14 @@ typedef enum { x86_reg_emit ((inst), (dreg), (sreg)); \ } while (0) +#define x86_movd_xreg_membase(inst,sreg,basereg,disp) \ + do { \ + *(inst)++ = (unsigned char)0x66; \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x6e; \ + x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ + } while (0) + #define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \ do { \ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \ -- cgit v1.1 From c70f15fc12afeb73f19d4ff18cf11b7289d76c4f Mon Sep 17 00:00:00 2001 From: Mark Probst Date: Mon, 2 Feb 2009 23:32:58 +0000 Subject: 2009-02-02 Mark Probst Contributed under the terms of the MIT/X11 license by Steven Munroe . * ppc/ppc-codegen.h: Make operand order and case consistent (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs, ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz, ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu, ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw, ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use "i" or "ui" instead of "d" for immediated operands to immediate arthimetic and logical instructions in macros ppc_addi, ppc_addis, ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd. [__mono_ppc64__]: Make operand order and case consistent (assembler order) for ppc_load_multiple_regs, ppc_store_multiple_regs. Simplify the DS form and make them consistent with D forms for ppc_load_reg, ppc_load_reg_update, ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu, ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux. 2009-02-02 Mark Probst Contributed under the terms of the MIT/X11 license by Steven Munroe . * exceptions-ppc.c (restore_regs_from_context): Correct operand order (offset then base reg) for ppc_load_multiple_regs. (emit_save_saved_regs) Correct operand order for ppc_store_multiple_regs. (mono_arch_get_call_filter): Correct operand order for ppc_load_multiple_regs. * mini-ppc.c (emit_memcpy): Fix operand order for ppc_load_reg_update and ppc_store_reg_update. (mono_arch_output_basic_block): Correct operand order for ppc_lha. (mono_arch_emit_epilog): Correct operand order for ppc_load_multiple_regs. * tramp-ppc.c (mono_arch_create_trampoline_code): Correct operand order for ppc_store_multiple_regs and ppc_load_multiple_regs. svn path=/trunk/mono/; revision=125443 --- ChangeLog | 21 +++++++++++ ppc/ppc-codegen.h | 103 ++++++++++++++++++++++++++++++------------------------ 2 files changed, 79 insertions(+), 45 deletions(-) diff --git a/ChangeLog b/ChangeLog index f76c0b5..ceef7ff 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,24 @@ +2009-02-02 Mark Probst + + Contributed under the terms of the MIT/X11 license by Steven + Munroe . + + * ppc/ppc-codegen.h: Make operand order and case consistent + (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs, + ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz, + ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu, + ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw, + ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use + "i" or "ui" instead of "d" for immediated operands to immediate + arthimetic and logical instructions in macros ppc_addi, ppc_addis, + ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd. + [__mono_ppc64__]: Make operand order and case consistent + (assembler order) for ppc_load_multiple_regs, + ppc_store_multiple_regs. Simplify the DS form and make them + consistent with D forms for ppc_load_reg, ppc_load_reg_update, + ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu, + ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux. + 2009-01-19 Rodrigo Kumpera * x86/x86-codegen.h: Add x86_movd_xreg_membase. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index f1552a4..08bb9e4 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -125,7 +125,7 @@ enum { #define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0) -#define ppc_is_imm16(val) ((glong)(val) >= (glong)-(1L<<15) && (glong)(val) <= (glong)((1L<<15)-1L)) +#define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1)) #define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L) #define ppc_load32(c,D,v) G_STMT_START { \ @@ -150,16 +150,16 @@ enum { #define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V)) #define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (A), (d)) +#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) #define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) #define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) -#define ppc_load_multiple_regs(c,D,A,d) ppc_lmw ((c), (D), (A), (d)) +#define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A)) #define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) #define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) #define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) -#define ppc_store_multiple_regs(c,S,A,D) ppc_stmw ((c), (S), (A), (D)) +#define ppc_store_multiple_regs(c,S,d,A) ppc_stmw ((c), (S), (d), (A)) #define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B)) #define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B)) @@ -183,20 +183,20 @@ enum { #define ppc_split_5_1(x) ((ppc_split_5_1_5(x) << 1) | ppc_split_5_1_1(x)) #define ppc_break(c) ppc_tw((c),31,0,0) -#define ppc_addi(c,D,A,d) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) -#define ppc_addis(c,D,A,d) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_addi(c,D,A,i) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i)) +#define ppc_addis(c,D,A,i) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i)) #define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v)) #define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v)) -#define ppc_lwz(c,D,d,a) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_lhz(c,D,d,a) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_lbz(c,D,d,a) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_stw(c,S,d,a) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_sth(c,S,d,a) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_stb(c,S,d,a) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d)) -#define ppc_stwu(c,s,d,a) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((a) << 16) | (guint16)(d)) +#define ppc_lwz(c,D,d,A) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_lhz(c,D,d,A) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_lbz(c,D,d,A) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_stw(c,S,d,A) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_sth(c,S,d,A) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_stb(c,S,d,A) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_stwu(c,s,d,A) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((A) << 16) | (guint16)(d)) #define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888) #define ppc_mr(c,a,s) ppc_or (c, a, s, s) -#define ppc_ori(c,S,A,u) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(u)) +#define ppc_ori(c,S,A,ui) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(ui)) #define ppc_nop(c) ppc_ori (c, 0, 0, 0) #define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1)) #define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr) @@ -257,8 +257,8 @@ my and Ximian's copyright to this code. ;) #define ppc_addeo(c,D,A,B) ppc_addex(c,D,A,B,1,0) #define ppc_addeod(c,D,A,B) ppc_addex(c,D,A,B,1,1) -#define ppc_addic(c,D,A,d) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) -#define ppc_addicd(c,D,A,d) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d)) +#define ppc_addic(c,D,A,i) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i)) +#define ppc_addicd(c,D,A,i) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i)) #define ppc_addmex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (234 << 1) | RC) #define ppc_addme(c,D,A) ppc_addmex(c,D,A,0,0) @@ -280,8 +280,8 @@ my and Ximian's copyright to this code. ;) #define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0) #define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1) -#define ppc_andid(c,S,A,d) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d))) -#define ppc_andisd(c,S,A,d) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(d))) +#define ppc_andid(c,S,A,ui) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui))) +#define ppc_andisd(c,S,A,ui) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui))) #define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK) #define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0) @@ -477,36 +477,36 @@ my and Ximian's copyright to this code. ;) #define ppc_isync(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (150 << 1) | 0) -#define ppc_lbzu(c,D,A,d) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lbzu(c,D,d,A) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lbzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (119 << 1) | 0) #define ppc_lbzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (87 << 1) | 0) -#define ppc_lfdu(c,D,A,d) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lfdu(c,D,d,A) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lfdux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (631 << 1) | 0) #define ppc_lfdx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (599 << 1) | 0) -#define ppc_lfsu(c,D,A,d) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d) -#define ppc_lfsux(c,D,A,d) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (567 << 1) | 0) -#define ppc_lfsx(c,D,A,d) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (535 << 1) | 0) +#define ppc_lfsu(c,D,d,A) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lfsux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (567 << 1) | 0) +#define ppc_lfsx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (535 << 1) | 0) -#define ppc_lha(c,D,A,d) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d) -#define ppc_lhau(c,D,A,d) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lha(c,D,d,A) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lhau(c,D,d,A) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lhaux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (375 << 1) | 0) #define ppc_lhax(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (343 << 1) | 0) #define ppc_lhbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (790 << 1) | 0) -#define ppc_lhzu(c,D,A,d) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lhzu(c,D,d,A) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lhzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (311 << 1) | 0) #define ppc_lhzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (279 << 1) | 0) -#define ppc_lmw(c,D,A,d) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lmw(c,D,d,A) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lswi(c,D,A,NB) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (NB << 11) | (597 << 1) | 0) #define ppc_lswx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (533 << 1) | 0) #define ppc_lwarx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (20 << 1) | 0) #define ppc_lwbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (534 << 1) | 0) -#define ppc_lwzu(c,D,A,d) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d) +#define ppc_lwzu(c,D,d,A) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d) #define ppc_lwzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (55 << 1) | 0) #define ppc_lwzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (23 << 1) | 0) @@ -628,24 +628,24 @@ my and Ximian's copyright to this code. ;) #define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0) #define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1) -#define ppc_stbu(c,S,A,D) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(D)) +#define ppc_stbu(c,S,d,A) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(d)) #define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0) #define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0) -#define ppc_stfdu(c,S,A,D) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(D)) +#define ppc_stfdu(c,S,d,A) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(d)) #define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0) #define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0) -#define ppc_stfsu(c,S,A,D) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(D)) +#define ppc_stfsu(c,S,d,A) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(d)) #define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0) #define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0) #define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0) -#define ppc_sthu(c,S,A,D) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(D)) +#define ppc_sthu(c,S,d,A) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(d)) #define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0) #define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0) -#define ppc_stmw(c,S,A,D) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)D) +#define ppc_stmw(c,S,d,A) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)d) #define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0) #define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0) #define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0) @@ -718,13 +718,23 @@ my and Ximian's copyright to this code. ;) #define PPC_LOAD_SEQUENCE_LENGTH 20 -#define ppc_is_imm32(val) ((glong)(val) >= (glong)-(1L<<31) && (glong)(val) <= (glong)((1L<<31)-1)) +#define ppc_is_imm32(val) (((((long)val)>> 31) == 0) || ((((long)val)>> 31) == -1)) +#define ppc_is_imm48(val) (((((long)val)>> 47) == 0) || ((((long)val)>> 47) == -1)) + +#define ppc_load48(c,D,v) G_STMT_START { \ + ppc_li ((c), (D), ((gint64)(v) >> 32) & 0xffff); \ + ppc_sldi ((c), (D), (D), 32); \ + ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \ + ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \ + } G_STMT_END #define ppc_load(c,D,v) G_STMT_START { \ if (ppc_is_imm16 ((gulong)(v))) { \ ppc_li ((c), (D), (guint16)(guint64)(v)); \ } else if (ppc_is_imm32 ((gulong)(v))) { \ ppc_load32 ((c), (D), (guint32)(guint64)(v)); \ + } else if (ppc_is_imm48 ((gulong)(v))) { \ + ppc_load48 ((c), (D), (guint64)(v)); \ } else { \ ppc_load_sequence ((c), (D), (guint64)(v)); \ } \ @@ -736,11 +746,11 @@ my and Ximian's copyright to this code. ;) ppc_load_reg ((c), (D), 0, ppc_r11); \ } G_STMT_END -#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d) >> 2, (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d) >> 2, (A)) +#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d), (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A)) #define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) #define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) -#define ppc_load_multiple_regs(c,D,A,d) G_STMT_START { \ +#define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \ int __i, __o = (d); \ for (__i = (D); __i <= 31; ++__i) { \ ppc_load_reg ((c), __i, __o, (A)); \ @@ -748,12 +758,12 @@ my and Ximian's copyright to this code. ;) } \ } G_STMT_END -#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d) >> 2, (A)) -#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d) >> 2, (A)) +#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d), (A)) +#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) #define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) #define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) -#define ppc_store_multiple_regs(c,S,A,D) G_STMT_START { \ - int __i, __o = (D); \ +#define ppc_store_multiple_regs(c,S,d,A) G_STMT_START { \ + int __i, __o = (d); \ for (__i = (S); __i <= 31; ++__i) { \ ppc_store_reg ((c), __i, __o, (A)); \ __o += sizeof (gulong); \ @@ -802,11 +812,14 @@ my and Ximian's copyright to this code. ;) #define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0) #define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1) -#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 0) +#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0) +#define ppc_lwa(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((ds) & 0xfffc) | 2) #define ppc_ldarx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (84 << 1) | 0) -#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 1) +#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1) #define ppc_ldux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (53 << 1) | 0) +#define ppc_lwaux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (373 << 1) | 0) #define ppc_ldx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (21 << 1) | 0) +#define ppc_lwax(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (341 << 1) | 0) #define ppc_mulhdx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (73 << 1) | (Rc)) #define ppc_mulhd(c,D,A,B) ppc_mulhdx(c,D,A,B,0) @@ -871,9 +884,9 @@ my and Ximian's copyright to this code. ;) #define ppc_srd(c,A,S,B) ppc_srdx(c,S,A,B,0) #define ppc_srdd(c,A,S,B) ppc_srdx(c,S,A,B,1) -#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 0) +#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0) #define ppc_stdcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (214 << 1) | 1) -#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | (guint16)((ds) << 2) | 1) +#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1) #define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0) #define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0) -- cgit v1.1 From 22e6e9728faa11a87a7f6f0f0ff0f0f8ef754c03 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 27 Feb 2009 06:21:52 +0000 Subject: 2009-02-27 Zoltan Varga * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are autogenerated. svn path=/trunk/mono/; revision=128179 --- ChangeLog | 5 + arm/arm_fpamacros.h | 419 ---------------------------------------------------- arm/arm_vfpmacros.h | 299 ------------------------------------- 3 files changed, 5 insertions(+), 718 deletions(-) delete mode 100644 arm/arm_fpamacros.h delete mode 100644 arm/arm_vfpmacros.h diff --git a/ChangeLog b/ChangeLog index ceef7ff..a6b7408 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2009-02-27 Zoltan Varga + + * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are + autogenerated. + 2009-02-02 Mark Probst Contributed under the terms of the MIT/X11 license by Steven diff --git a/arm/arm_fpamacros.h b/arm/arm_fpamacros.h deleted file mode 100644 index 5de16f2..0000000 --- a/arm/arm_fpamacros.h +++ /dev/null @@ -1,419 +0,0 @@ -/* Macros for FPA ops, auto-generated from template */ - - -/* dyadic */ - -/* -- ADF -- */ - - -/* Fd := Rn ADF Rm */ -#define ARM_FPA_ADFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_ADF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_ADFD(p, rd, rn, rm) \ - ARM_FPA_ADFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_ADFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_ADF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_ADFS(p, rd, rn, rm) \ - ARM_FPA_ADFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- MUF -- */ - - -/* Fd := Rn MUF Rm */ -#define ARM_FPA_MUFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_MUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_MUFD(p, rd, rn, rm) \ - ARM_FPA_MUFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_MUFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_MUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_MUFS(p, rd, rn, rm) \ - ARM_FPA_MUFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- SUF -- */ - - -/* Fd := Rn SUF Rm */ -#define ARM_FPA_SUFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_SUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_SUFD(p, rd, rn, rm) \ - ARM_FPA_SUFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_SUFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_SUF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_SUFS(p, rd, rn, rm) \ - ARM_FPA_SUFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- RSF -- */ - - -/* Fd := Rn RSF Rm */ -#define ARM_FPA_RSFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RSF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_RSFD(p, rd, rn, rm) \ - ARM_FPA_RSFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_RSFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RSF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_RSFS(p, rd, rn, rm) \ - ARM_FPA_RSFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- DVF -- */ - - -/* Fd := Rn DVF Rm */ -#define ARM_FPA_DVFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_DVF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_DVFD(p, rd, rn, rm) \ - ARM_FPA_DVFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_DVFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_DVF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_DVFS(p, rd, rn, rm) \ - ARM_FPA_DVFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- RDF -- */ - - -/* Fd := Rn RDF Rm */ -#define ARM_FPA_RDFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RDF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_RDFD(p, rd, rn, rm) \ - ARM_FPA_RDFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_RDFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RDF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_RDFS(p, rd, rn, rm) \ - ARM_FPA_RDFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- POW -- */ - - -/* Fd := Rn POW Rm */ -#define ARM_FPA_POWD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_POWD(p, rd, rn, rm) \ - ARM_FPA_POWD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_POWS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_POWS(p, rd, rn, rm) \ - ARM_FPA_POWS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- RPW -- */ - - -/* Fd := Rn RPW Rm */ -#define ARM_FPA_RPWD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RPW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_RPWD(p, rd, rn, rm) \ - ARM_FPA_RPWD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_RPWS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RPW,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_RPWS(p, rd, rn, rm) \ - ARM_FPA_RPWS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- RMF -- */ - - -/* Fd := Rn RMF Rm */ -#define ARM_FPA_RMFD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RMF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_RMFD(p, rd, rn, rm) \ - ARM_FPA_RMFD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_RMFS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_RMF,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_RMFS(p, rd, rn, rm) \ - ARM_FPA_RMFS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- FML -- */ - - -/* Fd := Rn FML Rm */ -#define ARM_FPA_FMLD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FML,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_FMLD(p, rd, rn, rm) \ - ARM_FPA_FMLD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_FMLS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FML,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_FMLS(p, rd, rn, rm) \ - ARM_FPA_FMLS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- FDV -- */ - - -/* Fd := Rn FDV Rm */ -#define ARM_FPA_FDVD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FDV,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_FDVD(p, rd, rn, rm) \ - ARM_FPA_FDVD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_FDVS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FDV,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_FDVS(p, rd, rn, rm) \ - ARM_FPA_FDVS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- FRD -- */ - - -/* Fd := Rn FRD Rm */ -#define ARM_FPA_FRDD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FRD,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_FRDD(p, rd, rn, rm) \ - ARM_FPA_FRDD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_FRDS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_FRD,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_FRDS(p, rd, rn, rm) \ - ARM_FPA_FRDS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- POL -- */ - - -/* Fd := Rn POL Rm */ -#define ARM_FPA_POLD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POL,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_POLD(p, rd, rn, rm) \ - ARM_FPA_POLD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_POLS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_POL,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_POLS(p, rd, rn, rm) \ - ARM_FPA_POLS_COND(p, rd, rn, rm, ARMCOND_AL) - - - -/* monadic */ - -/* -- MVF -- */ - - -/* Fd := MVF Rm */ - -#define ARM_MVFD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MVF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_MVFD(p,dreg,sreg) ARM_MVFD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_MVFS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MVF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_MVFS(p,dreg,sreg) ARM_MVFS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- MNF -- */ - - -/* Fd := MNF Rm */ - -#define ARM_MNFD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MNF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_MNFD(p,dreg,sreg) ARM_MNFD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_MNFS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_MNF,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_MNFS(p,dreg,sreg) ARM_MNFS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- ABS -- */ - - -/* Fd := ABS Rm */ - -#define ARM_ABSD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ABS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_ABSD(p,dreg,sreg) ARM_ABSD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_ABSS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ABS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_ABSS(p,dreg,sreg) ARM_ABSS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- RND -- */ - - -/* Fd := RND Rm */ - -#define ARM_RNDD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_RNDD(p,dreg,sreg) ARM_RNDD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_RNDS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_RNDS(p,dreg,sreg) ARM_RNDS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- SQT -- */ - - -/* Fd := SQT Rm */ - -#define ARM_SQTD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SQT,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_SQTD(p,dreg,sreg) ARM_SQTD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_SQTS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SQT,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_SQTS(p,dreg,sreg) ARM_SQTS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- LOG -- */ - - -/* Fd := LOG Rm */ - -#define ARM_LOGD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_LOG,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_LOGD(p,dreg,sreg) ARM_LOGD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_LOGS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_LOG,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_LOGS(p,dreg,sreg) ARM_LOGS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- EXP -- */ - - -/* Fd := EXP Rm */ - -#define ARM_EXPD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_EXP,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_EXPD(p,dreg,sreg) ARM_EXPD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_EXPS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_EXP,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_EXPS(p,dreg,sreg) ARM_EXPS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- SIN -- */ - - -/* Fd := SIN Rm */ - -#define ARM_SIND_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SIN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_SIND(p,dreg,sreg) ARM_SIND_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_SINS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_SIN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_SINS(p,dreg,sreg) ARM_SINS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- COS -- */ - - -/* Fd := COS Rm */ - -#define ARM_COSD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_COS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_COSD(p,dreg,sreg) ARM_COSD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_COSS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_COS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_COSS(p,dreg,sreg) ARM_COSS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- TAN -- */ - - -/* Fd := TAN Rm */ - -#define ARM_TAND_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_TAN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_TAND(p,dreg,sreg) ARM_TAND_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_TANS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_TAN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_TANS(p,dreg,sreg) ARM_TANS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- ASN -- */ - - -/* Fd := ASN Rm */ - -#define ARM_ASND_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ASN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_ASND(p,dreg,sreg) ARM_ASND_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_ASNS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ASN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_ASNS(p,dreg,sreg) ARM_ASNS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- ACS -- */ - - -/* Fd := ACS Rm */ - -#define ARM_ACSD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ACS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_ACSD(p,dreg,sreg) ARM_ACSD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_ACSS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ACS,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_ACSS(p,dreg,sreg) ARM_ACSS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- ATN -- */ - - -/* Fd := ATN Rm */ - -#define ARM_ATND_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ATN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_ATND(p,dreg,sreg) ARM_ATND_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_ATNS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_ATN,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_ATNS(p,dreg,sreg) ARM_ATNS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- URD -- */ - - -/* Fd := URD Rm */ - -#define ARM_URDD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_URD,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_URDD(p,dreg,sreg) ARM_URDD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_URDS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_URD,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_URDS(p,dreg,sreg) ARM_URDS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- NRM -- */ - - -/* Fd := NRM Rm */ - -#define ARM_NRMD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_NRM,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_NRMD(p,dreg,sreg) ARM_NRMD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_NRMS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_NRM,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_NRMS(p,dreg,sreg) ARM_NRMS_COND(p,dreg,sreg,ARMCOND_AL) - - - - - - -/* end generated */ - diff --git a/arm/arm_vfpmacros.h b/arm/arm_vfpmacros.h deleted file mode 100644 index f502645..0000000 --- a/arm/arm_vfpmacros.h +++ /dev/null @@ -1,299 +0,0 @@ -/* Macros for VFP ops, auto-generated from template */ - - -/* dyadic */ - -/* -- ADD -- */ - - -/* Fd := Fn ADD Fm */ -#define ARM_VFP_ADDD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_ADD,rd,rn,rm)) -#define ARM_VFP_ADDD(p, rd, rn, rm) \ - ARM_VFP_ADDD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_VFP_ADDS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_ADD,rd,rn,rm)) -#define ARM_VFP_ADDS(p, rd, rn, rm) \ - ARM_VFP_ADDS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- SUB -- */ - - -/* Fd := Fn SUB Fm */ -#define ARM_VFP_SUBD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_SUB,rd,rn,rm)) -#define ARM_VFP_SUBD(p, rd, rn, rm) \ - ARM_VFP_SUBD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_VFP_SUBS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_SUB,rd,rn,rm)) -#define ARM_VFP_SUBS(p, rd, rn, rm) \ - ARM_VFP_SUBS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- MUL -- */ - - -/* Fd := Fn MUL Fm */ -#define ARM_VFP_MULD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_MUL,rd,rn,rm)) -#define ARM_VFP_MULD(p, rd, rn, rm) \ - ARM_VFP_MULD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_VFP_MULS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_MUL,rd,rn,rm)) -#define ARM_VFP_MULS(p, rd, rn, rm) \ - ARM_VFP_MULS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- NMUL -- */ - - -/* Fd := Fn NMUL Fm */ -#define ARM_VFP_NMULD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_NMUL,rd,rn,rm)) -#define ARM_VFP_NMULD(p, rd, rn, rm) \ - ARM_VFP_NMULD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_VFP_NMULS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_NMUL,rd,rn,rm)) -#define ARM_VFP_NMULS(p, rd, rn, rm) \ - ARM_VFP_NMULS_COND(p, rd, rn, rm, ARMCOND_AL) - - -/* -- DIV -- */ - - -/* Fd := Fn DIV Fm */ -#define ARM_VFP_DIVD_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_DIV,rd,rn,rm)) -#define ARM_VFP_DIVD(p, rd, rn, rm) \ - ARM_VFP_DIVD_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_VFP_DIVS_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_DIV,rd,rn,rm)) -#define ARM_VFP_DIVS(p, rd, rn, rm) \ - ARM_VFP_DIVS_COND(p, rd, rn, rm, ARMCOND_AL) - - - -/* monadic */ - -/* -- CPY -- */ - - -/* Fd := CPY Fm */ - -#define ARM_CPYD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CPY,(dreg),(sreg))) -#define ARM_CPYD(p,dreg,sreg) ARM_CPYD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CPYS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CPY,(dreg),(sreg))) -#define ARM_CPYS(p,dreg,sreg) ARM_CPYS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- ABS -- */ - - -/* Fd := ABS Fm */ - -#define ARM_ABSD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_ABS,(dreg),(sreg))) -#define ARM_ABSD(p,dreg,sreg) ARM_ABSD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_ABSS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_ABS,(dreg),(sreg))) -#define ARM_ABSS(p,dreg,sreg) ARM_ABSS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- NEG -- */ - - -/* Fd := NEG Fm */ - -#define ARM_NEGD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_NEG,(dreg),(sreg))) -#define ARM_NEGD(p,dreg,sreg) ARM_NEGD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_NEGS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_NEG,(dreg),(sreg))) -#define ARM_NEGS(p,dreg,sreg) ARM_NEGS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- SQRT -- */ - - -/* Fd := SQRT Fm */ - -#define ARM_SQRTD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_SQRT,(dreg),(sreg))) -#define ARM_SQRTD(p,dreg,sreg) ARM_SQRTD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_SQRTS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_SQRT,(dreg),(sreg))) -#define ARM_SQRTS(p,dreg,sreg) ARM_SQRTS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- CMP -- */ - - -/* Fd := CMP Fm */ - -#define ARM_CMPD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMP,(dreg),(sreg))) -#define ARM_CMPD(p,dreg,sreg) ARM_CMPD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CMPS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMP,(dreg),(sreg))) -#define ARM_CMPS(p,dreg,sreg) ARM_CMPS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- CMPE -- */ - - -/* Fd := CMPE Fm */ - -#define ARM_CMPED_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPE,(dreg),(sreg))) -#define ARM_CMPED(p,dreg,sreg) ARM_CMPED_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CMPES_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPE,(dreg),(sreg))) -#define ARM_CMPES(p,dreg,sreg) ARM_CMPES_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- CMPZ -- */ - - -/* Fd := CMPZ Fm */ - -#define ARM_CMPZD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPZ,(dreg),(sreg))) -#define ARM_CMPZD(p,dreg,sreg) ARM_CMPZD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CMPZS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPZ,(dreg),(sreg))) -#define ARM_CMPZS(p,dreg,sreg) ARM_CMPZS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- CMPEZ -- */ - - -/* Fd := CMPEZ Fm */ - -#define ARM_CMPEZD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CMPEZ,(dreg),(sreg))) -#define ARM_CMPEZD(p,dreg,sreg) ARM_CMPEZD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CMPEZS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CMPEZ,(dreg),(sreg))) -#define ARM_CMPEZS(p,dreg,sreg) ARM_CMPEZS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- CVT -- */ - - -/* Fd := CVT Fm */ - -#define ARM_CVTD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_CVT,(dreg),(sreg))) -#define ARM_CVTD(p,dreg,sreg) ARM_CVTD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_CVTS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_CVT,(dreg),(sreg))) -#define ARM_CVTS(p,dreg,sreg) ARM_CVTS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- UITO -- */ - - -/* Fd := UITO Fm */ - -#define ARM_UITOD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_UITO,(dreg),(sreg))) -#define ARM_UITOD(p,dreg,sreg) ARM_UITOD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_UITOS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_UITO,(dreg),(sreg))) -#define ARM_UITOS(p,dreg,sreg) ARM_UITOS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- SITO -- */ - - -/* Fd := SITO Fm */ - -#define ARM_SITOD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_SITO,(dreg),(sreg))) -#define ARM_SITOD(p,dreg,sreg) ARM_SITOD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_SITOS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_SITO,(dreg),(sreg))) -#define ARM_SITOS(p,dreg,sreg) ARM_SITOS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- TOUI -- */ - - -/* Fd := TOUI Fm */ - -#define ARM_TOUID_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOUI,(dreg),(sreg))) -#define ARM_TOUID(p,dreg,sreg) ARM_TOUID_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_TOUIS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOUI,(dreg),(sreg))) -#define ARM_TOUIS(p,dreg,sreg) ARM_TOUIS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- TOSI -- */ - - -/* Fd := TOSI Fm */ - -#define ARM_TOSID_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOSI,(dreg),(sreg))) -#define ARM_TOSID(p,dreg,sreg) ARM_TOSID_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_TOSIS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOSI,(dreg),(sreg))) -#define ARM_TOSIS(p,dreg,sreg) ARM_TOSIS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- TOUIZ -- */ - - -/* Fd := TOUIZ Fm */ - -#define ARM_TOUIZD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOUIZ,(dreg),(sreg))) -#define ARM_TOUIZD(p,dreg,sreg) ARM_TOUIZD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_TOUIZS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOUIZ,(dreg),(sreg))) -#define ARM_TOUIZS(p,dreg,sreg) ARM_TOUIZS_COND(p,dreg,sreg,ARMCOND_AL) - - -/* -- TOSIZ -- */ - - -/* Fd := TOSIZ Fm */ - -#define ARM_TOSIZD_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_TOSIZ,(dreg),(sreg))) -#define ARM_TOSIZD(p,dreg,sreg) ARM_TOSIZD_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_TOSIZS_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_TOSIZ,(dreg),(sreg))) -#define ARM_TOSIZS(p,dreg,sreg) ARM_TOSIZS_COND(p,dreg,sreg,ARMCOND_AL) - - - - - - -/* end generated */ - -- cgit v1.1 From a7f6dd7620d7c440216c0f156bcd969a28a592d4 Mon Sep 17 00:00:00 2001 From: Martin Baulig Date: Sat, 28 Feb 2009 14:36:50 +0000 Subject: Create .gitignore's. svn path=/trunk/mono/; revision=128265 --- .gitignore | 6 ++++++ alpha/.gitignore | 4 ++++ amd64/.gitignore | 4 ++++ arm/.gitignore | 13 +++++++++++++ hppa/.gitignore | 3 +++ ia64/.gitignore | 2 ++ mips/.gitignore | 6 ++++++ ppc/.gitignore | 7 +++++++ s390/.gitignore | 4 ++++ s390x/.gitignore | 6 ++++++ sparc/.gitignore | 3 +++ x86/.gitignore | 6 ++++++ 12 files changed, 64 insertions(+) create mode 100644 .gitignore create mode 100644 alpha/.gitignore create mode 100644 amd64/.gitignore create mode 100644 arm/.gitignore create mode 100644 hppa/.gitignore create mode 100644 ia64/.gitignore create mode 100644 mips/.gitignore create mode 100644 ppc/.gitignore create mode 100644 s390/.gitignore create mode 100644 s390x/.gitignore create mode 100644 sparc/.gitignore create mode 100644 x86/.gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..16c9840 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +/Makefile +/Makefile.in +/.deps +/.libs +/*.la +/*.lo diff --git a/alpha/.gitignore b/alpha/.gitignore new file mode 100644 index 0000000..6abcd22 --- /dev/null +++ b/alpha/.gitignore @@ -0,0 +1,4 @@ +/Makefile.in +/Makefile +/.deps +/.cvsignore diff --git a/amd64/.gitignore b/amd64/.gitignore new file mode 100644 index 0000000..6930f61 --- /dev/null +++ b/amd64/.gitignore @@ -0,0 +1,4 @@ +/Makefile.in +/Makefile +/.deps +/.libs diff --git a/arm/.gitignore b/arm/.gitignore new file mode 100644 index 0000000..0b0e716 --- /dev/null +++ b/arm/.gitignore @@ -0,0 +1,13 @@ +/Makefile +/Makefile.in +/.deps +/.libs +/*.o +/*.la +/*.lo +/*.lib +/*.obj +/*.exe +/*.dll +/arm_dpimacros.h +/fixeol.sh diff --git a/hppa/.gitignore b/hppa/.gitignore new file mode 100644 index 0000000..dc1ebd2 --- /dev/null +++ b/hppa/.gitignore @@ -0,0 +1,3 @@ +/Makefile +/Makefile.in +/.deps diff --git a/ia64/.gitignore b/ia64/.gitignore new file mode 100644 index 0000000..b336cc7 --- /dev/null +++ b/ia64/.gitignore @@ -0,0 +1,2 @@ +/Makefile +/Makefile.in diff --git a/mips/.gitignore b/mips/.gitignore new file mode 100644 index 0000000..13efac7 --- /dev/null +++ b/mips/.gitignore @@ -0,0 +1,6 @@ +/ +/Makefile +/Makefile.in +/*.o +/*.lo +/.deps diff --git a/ppc/.gitignore b/ppc/.gitignore new file mode 100644 index 0000000..c577ff6 --- /dev/null +++ b/ppc/.gitignore @@ -0,0 +1,7 @@ +/Makefile +/Makefile.in +/.libs +/.deps +/*.la +/*.lo +/test diff --git a/s390/.gitignore b/s390/.gitignore new file mode 100644 index 0000000..6abcd22 --- /dev/null +++ b/s390/.gitignore @@ -0,0 +1,4 @@ +/Makefile.in +/Makefile +/.deps +/.cvsignore diff --git a/s390x/.gitignore b/s390x/.gitignore new file mode 100644 index 0000000..341daec --- /dev/null +++ b/s390x/.gitignore @@ -0,0 +1,6 @@ +/Makefile +/Makefile.in +/.libs +/.deps +/*.la +/*.lo diff --git a/sparc/.gitignore b/sparc/.gitignore new file mode 100644 index 0000000..dc1ebd2 --- /dev/null +++ b/sparc/.gitignore @@ -0,0 +1,3 @@ +/Makefile +/Makefile.in +/.deps diff --git a/x86/.gitignore b/x86/.gitignore new file mode 100644 index 0000000..341daec --- /dev/null +++ b/x86/.gitignore @@ -0,0 +1,6 @@ +/Makefile +/Makefile.in +/.libs +/.deps +/*.la +/*.lo -- cgit v1.1 From 7c682141c5861685e5b0efdcc1f337083657cf9d Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 6 Mar 2009 15:55:12 +0000 Subject: 2009-03-06 Zoltan Varga * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing a file in the middle of a function. svn path=/trunk/mono/; revision=128782 --- ChangeLog | 5 +++++ arm/tramp.c | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index a6b7408..539ef22 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2009-03-06 Zoltan Varga + + * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing + a file in the middle of a function. + 2009-02-27 Zoltan Varga * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are diff --git a/arm/tramp.c b/arm/tramp.c index c2a1c20..f736c7a 100644 --- a/arm/tramp.c +++ b/arm/tramp.c @@ -10,6 +10,9 @@ #if defined(_WIN32_WCE) || defined (UNDER_CE) # include +#else +#include +#include #endif #if !defined(PLATFORM_MACOSX) @@ -76,8 +79,6 @@ void* alloc_code_buff (int num_instr) code_buff = malloc(code_size); VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot); #else -#include -#include int page_size = sysconf(_SC_PAGESIZE); int new_code_size; -- cgit v1.1 From 9f497af70ef5ed9244ffbe9a6263f7d077136148 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 2 Apr 2009 00:50:47 +0000 Subject: 2009-04-02 Zoltan Varga * arm/arm-vfp-codegen.h: Add missing VFP codegen macros. svn path=/trunk/mono/; revision=130817 --- ChangeLog | 4 ++++ arm/arm-vfp-codegen.h | 43 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 539ef22..63293fa 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-04-02 Zoltan Varga + + * arm/arm-vfp-codegen.h: Add missing VFP codegen macros. + 2009-03-06 Zoltan Varga * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index fe15dd0..6396467 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -175,8 +175,47 @@ enum { #define ARM_FMXR(p,freg,reg) \ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg))) -#define ARM_FMRX(p,reg,freg) \ - ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(freg),(reg))) +#define ARM_FMRX(p,reg,fcreg) \ + ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(fcreg),(reg))) + +#define ARM_FMSTAT(p) \ + ARM_FMRX((p),ARMREG_R15,ARM_VFP_SCR) + +#define ARM_DEF_MCRR(cond,cp,rn,rd,Fm,M) \ + ((Fm) << 0) | \ + (1 << 4) | \ + ((M) << 5) | \ + ((cp) << 8) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((2) << 21) | \ + (12 << 24) | \ + ARM_DEF_COND(cond) + +#define ARM_FMDRR(p,rd,rn,dm) \ + ARM_EMIT((p), ARM_DEF_MCRR(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1)) + +#define ARM_DEF_FMRRD(cond,cp,rn,rd,Dm,D) \ + ((Dm) << 0) | \ + (1 << 4) | \ + ((cp) << 8) | \ + ((rd) << 12) | \ + ((rn) << 16) | \ + ((0xc5) << 20) | \ + ARM_DEF_COND(cond) + +#define ARM_FMRRD(p,rd,rn,dm) \ + ARM_EMIT((p), ARM_DEF_FMRRD(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1)) + +#define ARM_DEF_FUITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0) + +#define ARM_FUITOS(p,dreg,sreg) \ + ARM_EMIT((p), ARM_DEF_FUITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1)) + +#define ARM_DEF_FUITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0) + +#define ARM_FUITOD(p,dreg,sreg) \ + ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1)) #endif /* __MONO_ARM_VFP_CODEGEN_H__ */ -- cgit v1.1 From 7b7235494cabe7c5a796fafd6297070f993b03a8 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 2 Apr 2009 22:37:35 +0000 Subject: 2009-04-03 Zoltan Varga * amd64/amd64-codegen.h: Add macros for decoding the SIB byte. svn path=/trunk/mono/; revision=130910 --- ChangeLog | 4 ++++ amd64/amd64-codegen.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index 63293fa..ba168ca 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-04-03 Zoltan Varga + + * amd64/amd64-codegen.h: Add macros for decoding the SIB byte. + 2009-04-02 Zoltan Varga * arm/arm-vfp-codegen.h: Add missing VFP codegen macros. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 6ef62c5..a5c987a 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -129,6 +129,10 @@ typedef union { #define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) #define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) +#define amd64_sib_scale(sib) ((sib) >> 6) +#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) +#define amd64_sib_base(sib) ((sib) & 0x7) + #define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ -- cgit v1.1 From 965b554666f2999b9e01dd731b1134af1cfcd5fa Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 6 Apr 2009 15:09:57 +0000 Subject: 2009-04-06 Zoltan Varga * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD. svn path=/trunk/mono/; revision=131125 --- ChangeLog | 4 ++++ arm/arm-vfp-codegen.h | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/ChangeLog b/ChangeLog index ba168ca..0c74083 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-04-06 Zoltan Varga + + * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD. + 2009-04-03 Zoltan Varga * amd64/amd64-codegen.h: Add macros for decoding the SIB byte. diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index 6396467..7916957 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -217,5 +217,15 @@ enum { #define ARM_FUITOD(p,dreg,sreg) \ ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1)) +#define ARM_DEF_FSITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0) + +#define ARM_FSITOS(p,dreg,sreg) \ + ARM_EMIT((p), ARM_DEF_FSITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1)) + +#define ARM_DEF_FSITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0) + +#define ARM_FSITOD(p,dreg,sreg) \ + ARM_EMIT((p), ARM_DEF_FSITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1)) + #endif /* __MONO_ARM_VFP_CODEGEN_H__ */ -- cgit v1.1 From 76cddabf0319c7be9fae2b6c532aafe6587fafbc Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Wed, 22 Apr 2009 23:59:10 +0000 Subject: merge svn path=/trunk/mono/; revision=132427 --- ppc/tramp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppc/tramp.c b/ppc/tramp.c index abf5397..6bb1896 100644 --- a/ppc/tramp.c +++ b/ppc/tramp.c @@ -63,7 +63,7 @@ disassemble (guint8 *code, int size) { int i; FILE *ofd; - const char *tmp = getenv("TMP"); + const char *tmp = g_getenv("TMP"); char *as_file; char *o_file; char *cmd; -- cgit v1.1 From 9629536810d07a63b980a29912eaf3df7313fee9 Mon Sep 17 00:00:00 2001 From: Jerri Maine Date: Fri, 12 Jun 2009 17:33:11 +0000 Subject: Add marcos for coding two byte SIMD/SSE opcodes. Added comments to help tell the different types of SSE code gen marcos appart. svn path=/trunk/mono/; revision=136018 --- ChangeLog | 8 ++++++++ amd64/amd64-codegen.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/ChangeLog b/ChangeLog index 0c74083..2b4a92a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2009-10-06 Jerry Maine + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * amd64/amd64-codegen.h: Add marcos for coding two byte SIMD/SSE opcodes. + Added comments to help tell the different types of SSE code gen marcos appart. + 2009-04-06 Zoltan Varga * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD. diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index a5c987a..87b4dc4 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -504,6 +504,33 @@ typedef union { * SSE */ +/* Two opcode SSE defines */ + +#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x86_reg_emit ((inst), (dreg), (reg)); \ +} while (0) + +#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) + +#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ +} while (0) + +#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ +} while (0) + +/* Three opcode SSE defines */ + #define emit_opcode3(inst,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ @@ -536,6 +563,8 @@ typedef union { amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ } while (0) +/* specific SSE opcode defines */ + #define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) #define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) -- cgit v1.1 From 3858973d0bd980206ea3725a2e74f2a336aa1aa1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 20 Jun 2009 13:04:42 +0000 Subject: 2009-06-20 Zoltan Varga * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities. Handle little endian host platforms in ppc_emit32. svn path=/trunk/mono/; revision=136539 --- ChangeLog | 5 +++ ppc/ppc-codegen.h | 98 ++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 77 insertions(+), 26 deletions(-) diff --git a/ChangeLog b/ChangeLog index 2b4a92a..f8bb22e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2009-06-20 Zoltan Varga + + * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities. + Handle little endian host platforms in ppc_emit32. + 2009-10-06 Jerry Maine Contributed under the terms of the MIT/X11 license by diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 08bb9e4..c80cba9 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -123,7 +123,7 @@ enum { PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ }; -#define ppc_emit32(c,x) do { *((guint32 *) (c)) = x; (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0) +#define ppc_emit32(c,x) do { *((guint32 *) (c)) = GUINT32_TO_BE (x); (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0) #define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1)) #define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L) @@ -133,6 +133,58 @@ enum { ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \ } G_STMT_END +/* Macros to load/store pointer sized quantities */ + +#if defined(__mono_ppc64__) && !defined(__mono_ilp32__) + +#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d), (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A)) +#define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) +#define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) + +#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d), (A)) +#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) +#define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) +#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) + +#else + +/* Same as ppc32 */ +#define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) +#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) +#define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) +#define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) + +#define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) +#define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) +#define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) +#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) + +#endif + +/* Macros to load/store regsize quantities */ + +#ifdef __mono_ppc64__ +#define ppc_ldr(c,D,d,A) ppc_ld ((c), (D), (d), (A)) +#define ppc_ldr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) +#define ppc_str(c,S,d,A) ppc_std ((c), (S), (d), (A)) +#define ppc_str_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) +#define ppc_str_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) +#define ppc_str_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) +#else +#define ppc_ldr(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) +#define ppc_ldr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) +#define ppc_str(c,S,d,A) ppc_stw ((c), (S), (d), (A)) +#define ppc_str_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) +#define ppc_str_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) +#define ppc_str_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) +#endif + +#define ppc_str_multiple(c,S,d,A) ppc_store_multiple_regs((c),(S),(d),(A)) +#define ppc_ldr_multiple(c,D,d,A) ppc_load_multiple_regs((c),(D),(d),(A)) + +/* PPC32 macros */ + #ifndef __mono_ppc64__ #define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v)) @@ -709,17 +761,18 @@ my and Ximian's copyright to this code. ;) #ifdef __mono_ppc64__ #define ppc_load_sequence(c,D,v) G_STMT_START { \ - ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \ - ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \ + guint64 val = (guint64)(v); \ + ppc_lis ((c), (D), (val >> 48) & 0xffff); \ + ppc_ori ((c), (D), (D), (val >> 32) & 0xffff); \ ppc_sldi ((c), (D), (D), 32); \ - ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \ - ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \ + ppc_oris ((c), (D), (D), (val >> 16) & 0xffff); \ + ppc_ori ((c), (D), (D), val & 0xffff); \ } G_STMT_END #define PPC_LOAD_SEQUENCE_LENGTH 20 -#define ppc_is_imm32(val) (((((long)val)>> 31) == 0) || ((((long)val)>> 31) == -1)) -#define ppc_is_imm48(val) (((((long)val)>> 47) == 0) || ((((long)val)>> 47) == -1)) +#define ppc_is_imm32(val) (((((gint64)val)>> 31) == 0) || ((((gint64)val)>> 31) == -1)) +#define ppc_is_imm48(val) (((((gint64)val)>> 47) == 0) || ((((gint64)val)>> 47) == -1)) #define ppc_load48(c,D,v) G_STMT_START { \ ppc_li ((c), (D), ((gint64)(v) >> 32) & 0xffff); \ @@ -729,14 +782,15 @@ my and Ximian's copyright to this code. ;) } G_STMT_END #define ppc_load(c,D,v) G_STMT_START { \ - if (ppc_is_imm16 ((gulong)(v))) { \ - ppc_li ((c), (D), (guint16)(guint64)(v)); \ - } else if (ppc_is_imm32 ((gulong)(v))) { \ - ppc_load32 ((c), (D), (guint32)(guint64)(v)); \ - } else if (ppc_is_imm48 ((gulong)(v))) { \ - ppc_load48 ((c), (D), (guint64)(v)); \ + guint64 val = (guint64)(v); \ + if (ppc_is_imm16 (val)) { \ + ppc_li ((c), (D), val); \ + } else if (ppc_is_imm32 (val)) { \ + ppc_load32 ((c), (D), val); \ + } else if (ppc_is_imm48 (val)) { \ + ppc_load48 ((c), (D), val); \ } else { \ - ppc_load_sequence ((c), (D), (guint64)(v)); \ + ppc_load_sequence ((c), (D), val); \ } \ } G_STMT_END @@ -746,27 +800,19 @@ my and Ximian's copyright to this code. ;) ppc_load_reg ((c), (D), 0, ppc_r11); \ } G_STMT_END -#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A)) -#define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) -#define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) #define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \ int __i, __o = (d); \ for (__i = (D); __i <= 31; ++__i) { \ - ppc_load_reg ((c), __i, __o, (A)); \ - __o += sizeof (gulong); \ + ppc_ldr ((c), __i, __o, (A)); \ + __o += sizeof (guint64); \ } \ } G_STMT_END -#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d), (A)) -#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) -#define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) -#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) #define ppc_store_multiple_regs(c,S,d,A) G_STMT_START { \ int __i, __o = (d); \ for (__i = (S); __i <= 31; ++__i) { \ - ppc_store_reg ((c), __i, __o, (A)); \ - __o += sizeof (gulong); \ + ppc_str ((c), __i, __o, (A)); \ + __o += sizeof (guint64); \ } \ } G_STMT_END -- cgit v1.1 From cf0e113f7dd91ff8b46e35047cc48c2e5ece925c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 20 Jun 2009 18:47:03 +0000 Subject: 2009-06-20 Zoltan Varga * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside macros. svn path=/trunk/mono/; revision=136548 --- ChangeLog | 3 +++ ppc/ppc-codegen.h | 24 +++++++++++------------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/ChangeLog b/ChangeLog index f8bb22e..93543c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,8 @@ 2009-06-20 Zoltan Varga + * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside + macros. + * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities. Handle little endian host platforms in ppc_emit32. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index c80cba9..cdd0c3b 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -761,12 +761,11 @@ my and Ximian's copyright to this code. ;) #ifdef __mono_ppc64__ #define ppc_load_sequence(c,D,v) G_STMT_START { \ - guint64 val = (guint64)(v); \ - ppc_lis ((c), (D), (val >> 48) & 0xffff); \ - ppc_ori ((c), (D), (D), (val >> 32) & 0xffff); \ + ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \ + ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \ ppc_sldi ((c), (D), (D), 32); \ - ppc_oris ((c), (D), (D), (val >> 16) & 0xffff); \ - ppc_ori ((c), (D), (D), val & 0xffff); \ + ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \ + ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \ } G_STMT_END #define PPC_LOAD_SEQUENCE_LENGTH 20 @@ -782,15 +781,14 @@ my and Ximian's copyright to this code. ;) } G_STMT_END #define ppc_load(c,D,v) G_STMT_START { \ - guint64 val = (guint64)(v); \ - if (ppc_is_imm16 (val)) { \ - ppc_li ((c), (D), val); \ - } else if (ppc_is_imm32 (val)) { \ - ppc_load32 ((c), (D), val); \ - } else if (ppc_is_imm48 (val)) { \ - ppc_load48 ((c), (D), val); \ + if (ppc_is_imm16 ((guint64)(v))) { \ + ppc_li ((c), (D), (guint16)(guint64)(v)); \ + } else if (ppc_is_imm32 ((guint64)(v))) { \ + ppc_load32 ((c), (D), (guint32)(guint64)(v)); \ + } else if (ppc_is_imm48 ((guint64)(v))) { \ + ppc_load48 ((c), (D), (guint64)(v)); \ } else { \ - ppc_load_sequence ((c), (D), val); \ + ppc_load_sequence ((c), (D), (guint64)(v)); \ } \ } G_STMT_END -- cgit v1.1 From 40c668ecb1553ffb7b6575b439b3ff8420265cd8 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 22 Jun 2009 15:22:10 +0000 Subject: 2009-06-22 Zoltan Varga * ppc/ppc-codegen.h: Rework the naming of the load/store macros, ldr/str now handle register sized quantities, while ldptr/stptr handle pointer sized quantities. svn path=/trunk/mono/; revision=136604 --- ChangeLog | 6 ++++++ ppc/ppc-codegen.h | 38 +++++++++++++++++++++----------------- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/ChangeLog b/ChangeLog index 93543c2..c3698de 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2009-06-22 Zoltan Varga + + * ppc/ppc-codegen.h: Rework the naming of the load/store macros, + ldr/str now handle register sized quantities, while ldptr/stptr handle + pointer sized quantities. + 2009-06-20 Zoltan Varga * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index cdd0c3b..8dc407c 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -137,31 +137,35 @@ enum { #if defined(__mono_ppc64__) && !defined(__mono_ilp32__) -#define ppc_load_reg(c,D,d,A) ppc_ld ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A)) -#define ppc_load_reg_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) -#define ppc_load_reg_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) +#define ppc_ldptr(c,D,d,A) ppc_ld ((c), (D), (d), (A)) +#define ppc_ldptr_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A)) +#define ppc_ldptr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B)) +#define ppc_ldptr_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B)) -#define ppc_store_reg(c,S,d,A) ppc_std ((c), (S), (d), (A)) -#define ppc_store_reg_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) -#define ppc_store_reg_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) -#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) +#define ppc_stptr(c,S,d,A) ppc_std ((c), (S), (d), (A)) +#define ppc_stptr_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A)) +#define ppc_stptr_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B)) +#define ppc_stptr_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B)) #else /* Same as ppc32 */ -#define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) -#define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) -#define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) +#define ppc_ldptr(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) +#define ppc_ldptr_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) +#define ppc_ldptr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) +#define ppc_ldptr_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) -#define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) -#define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) -#define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) -#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) +#define ppc_stptr(c,S,d,A) ppc_stw ((c), (S), (d), (A)) +#define ppc_stptr_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) +#define ppc_stptr_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) +#define ppc_stptr_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) #endif +/* Macros to load pointer sized immediates */ +#define ppc_load_ptr(c,D,v) ppc_load ((c),(D),(gsize)(v)) +#define ppc_load_ptr_sequence(c,D,v) ppc_load_sequence ((c),(D),(gsize)(v)) + /* Macros to load/store regsize quantities */ #ifdef __mono_ppc64__ @@ -793,7 +797,7 @@ my and Ximian's copyright to this code. ;) } G_STMT_END #define ppc_load_func(c,D,v) G_STMT_START { \ - ppc_load_sequence ((c), ppc_r11, (guint64)(v)); \ + ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \ ppc_load_reg ((c), ppc_r2, 8, ppc_r11); \ ppc_load_reg ((c), (D), 0, ppc_r11); \ } G_STMT_END -- cgit v1.1 From 4ecc9d712b82d78c853e574edc0345c85bfcd660 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 22 Jun 2009 15:24:56 +0000 Subject: Fix a few uses of ppc_load_reg/ppc_store_reg. svn path=/trunk/mono/; revision=136606 --- ppc/ppc-codegen.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index 8dc407c..d255511 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -205,10 +205,6 @@ enum { #define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V)) -#define ppc_load_reg(c,D,d,A) ppc_lwz ((c), (D), (d), (A)) -#define ppc_load_reg_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A)) -#define ppc_load_reg_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B)) -#define ppc_load_reg_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B)) #define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A)) #define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) @@ -798,8 +794,8 @@ my and Ximian's copyright to this code. ;) #define ppc_load_func(c,D,v) G_STMT_START { \ ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \ - ppc_load_reg ((c), ppc_r2, 8, ppc_r11); \ - ppc_load_reg ((c), (D), 0, ppc_r11); \ + ppc_ldptr ((c), ppc_r2, 8, ppc_r11); \ + ppc_ldptr ((c), (D), 0, ppc_r11); \ } G_STMT_END #define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \ -- cgit v1.1 From f48a4f5a13745caf5350d6f190efb97ec6b605ef Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 22 Jun 2009 15:25:02 +0000 Subject: Fix a few uses of ppc_store_reg. svn path=/trunk/mono/; revision=136607 --- ppc/ppc-codegen.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index d255511..b1d1ea6 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -207,10 +207,6 @@ enum { #define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A)) -#define ppc_store_reg(c,S,d,A) ppc_stw ((c), (S), (d), (A)) -#define ppc_store_reg_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A)) -#define ppc_store_reg_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B)) -#define ppc_store_reg_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B)) #define ppc_store_multiple_regs(c,S,d,A) ppc_stmw ((c), (S), (d), (A)) #define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B)) -- cgit v1.1 From bb994071dcc42ba150d88776fe70f8d35fc522a9 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 23 Jun 2009 23:55:26 +0000 Subject: Fix LCONV_TO_xx and ICONV_TO_xx. Fix leave_method dump of returned structure. Fix formatting. Correct instruction lengths. Add new instructions. svn path=/trunk/mono/; revision=136748 --- s390x/ChangeLog | 4 ++++ s390x/s390x-codegen.h | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index e53ab6e..d35967b 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2009-06-24 Neale Ferguson + + * s390x-codegen.h: Add some new instructions. + 2007-04-12 Neale Ferguson * tramp.c: Add MONO_TYPE_PTR case. diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 6ae7058..6af46db 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -659,6 +659,7 @@ typedef struct { #define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) #define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) #define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d) +#define s390_lbr(c, r1, r2) S390_RRE(c, 0xb926, r1, r2) #define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) #define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2) #define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) @@ -672,6 +673,7 @@ typedef struct { #define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) #define s390_ley(c, r, x, b, d) S390_RXY(c, 0xed64, r, x, b, d) #define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) +#define s390_lgbr(c, r1, r2) S390_RRE(c, 0xb906, r1, r2) #define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) #define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) #define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) @@ -679,13 +681,19 @@ typedef struct { #define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) #define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) #define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) +#define s390_lhr(c, r1, r2) S390_RRE(c, 0xb927, r1, r2) #define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) +#define s390_lghr(c, r1, r2) S390_RRE(c, 0xb907, r1, r2) #define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) #define s390_lhy(c, r, x, b, d) S390_RXY(c, 0xe378, r, x, b, d) +#define s390_llcr(c, r1, r2) S390_RRE(c, 0xb994, r1, r2) #define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d) +#define s390_llgcr(c, r1, r2) S390_RRE(c, 0xb984, r1, r2) #define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d) #define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2) #define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d) +#define s390_llghr(c, r1, r2) S390_RRE(c, 0xb985, r1, r2) +#define s390_llhr(c, r1, r2) S390_RRE(c, 0xb995, r1, r2) #define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) #define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d) #define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) -- cgit v1.1 From 1c634ebda21ddf5392c9d8edd030323d1ad85962 Mon Sep 17 00:00:00 2001 From: Jerri Maine Date: Wed, 24 Jun 2009 21:19:29 +0000 Subject: mini-amd64.c: Added code to convert simd IR to native amd64 sse. amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. svn path=/trunk/mono/; revision=136785 --- ChangeLog | 7 ++++ amd64/amd64-codegen.h | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/ChangeLog b/ChangeLog index c3698de..d336219 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +2009-06-26 Jerry Maine + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. + 2009-06-22 Zoltan Varga * ppc/ppc-codegen.h: Rework the naming of the load/store macros, diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 87b4dc4..1ddc8b0 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -504,6 +504,8 @@ typedef union { * SSE */ +//TODO Reorganize SSE opcode defines. + /* Two opcode SSE defines */ #define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ @@ -609,6 +611,106 @@ typedef union { #define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) + + +#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) + +#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) + +#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) + +#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) + +#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) + +#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) + +#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) + +#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) + +#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) + +#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) + +#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) + +#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) + +#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) + +#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) + +#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) + +#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) + +#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) + +#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) + +#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) + + +#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) + + +#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) + +#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) + +#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) + +#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) + +#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) + +#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) + +#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x66, 0x0f, 0x54) + +#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) + +#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) + +#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) + +#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) + +#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) + +#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) + +#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) + +#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) + + +#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) + + +#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) + +#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) + +#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) + + +#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) + +#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) + +#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) + +#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) + + +#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) + /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -- cgit v1.1 From d7fa5cedae9e4859b340ee29e997dfd48b45ce6e Mon Sep 17 00:00:00 2001 From: Jerri Maine Date: Wed, 24 Jun 2009 21:25:11 +0000 Subject: Fix wrong date in my entry to ChangeLog files. Sorry! :(( svn path=/trunk/mono/; revision=136786 --- ChangeLog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index d336219..edbe8c3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -2009-06-26 Jerry Maine +2009-06-24 Jerry Maine Contributed under the terms of the MIT/X11 license by Jerry Maine . -- cgit v1.1 From 64d366eddf3b1c93bcaaff2190fa1cc2b01f7f03 Mon Sep 17 00:00:00 2001 From: Jerri Maine Date: Fri, 10 Jul 2009 22:35:07 +0000 Subject: Contributed under the terms of the MIT/X11 license by Jerry Maine . * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. * amd64/amd64-codegen.h: Fix bugs in simd marcos. svn path=/trunk/mono/; revision=137736 --- ChangeLog | 8 ++ amd64/amd64-codegen.h | 240 ++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 243 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index edbe8c3..9170465 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2009-07-10 Jerry Maine + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. + * amd64/amd64-codegen.h: Fix bugs in simd marcos. + 2009-06-24 Jerry Maine Contributed under the terms of the MIT/X11 license by diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 1ddc8b0..adf2392 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -517,6 +517,11 @@ typedef union { #define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) +#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ + emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ + x86_imm_emit8 ((inst), (imm)); \ +} while (0) + #define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op1); \ @@ -549,6 +554,11 @@ typedef union { #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) +#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ + emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ + x86_imm_emit8 ((inst), (imm)); \ +} while (0) + #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ @@ -565,6 +575,19 @@ typedef union { amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ } while (0) +/* Four opcode SSE defines */ + +#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ + *(inst)++ = (unsigned char)(op1); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + *(inst)++ = (unsigned char)(op4); \ + x86_reg_emit ((inst), (dreg), (reg)); \ +} while (0) + +#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) + /* specific SSE opcode defines */ #define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) @@ -612,6 +635,13 @@ typedef union { #define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) +#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) + +#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) + + +#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) + #define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) @@ -621,9 +651,9 @@ typedef union { #define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) -#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) +#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) -#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) +#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) #define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) @@ -659,22 +689,26 @@ typedef union { #define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) +#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) + #define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) #define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) #define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) -#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) +#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) -#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) +#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) #define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) -#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x66, 0x0f, 0x54) +#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) #define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) +#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) + #define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) #define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) @@ -711,6 +745,202 @@ typedef union { #define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) +#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) + +#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) + +#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) + + +#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) + +#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) + +#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) + + +#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) + +#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) + +#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) + + +#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) + +#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) + +#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) + +#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3) + + +#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) + +#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) + +#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) + + +#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) + +#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) + +#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) + +#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) + + +#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) + +#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) + +#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) + +#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) + + +#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) + + +#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) + +#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) + +#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) + +#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) + +#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) + +#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) + + +#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) + +#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) + +#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) + +#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) + +#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) + +#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) + + +#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) + +#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) + +#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) + +#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) + + +#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) + +#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + +#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) + +#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + + +#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) + +#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) + +#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) + +#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) + + +#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) + +#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) + +#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) + +#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) + +#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) + + +#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) + + +#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) + + +#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) + + +#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) + + +#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) + + +#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) + + +#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) + + +#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) + + +#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) + +#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) + +#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) + + +#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) + +#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) + + +#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) + +#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) + +#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) + +#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) + +#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) + +#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) + +#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) + /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -- cgit v1.1 From 88ccf5c589b23d6e79ea5a588d3986693b09879a Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 13 Jul 2009 21:58:58 +0000 Subject: 2009-07-13 Zoltan Varga * x86/x86-codegen.h: Applied patch from Marian Salaj . Fix encoding of PMINSW and PMINSD. Fixes #521662. svn path=/trunk/mono/; revision=137821 --- ChangeLog | 16 +++------------- x86/x86-codegen.h | 4 ++-- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/ChangeLog b/ChangeLog index 9170465..5b7e5db 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,17 +1,7 @@ -2009-07-10 Jerry Maine - - Contributed under the terms of the MIT/X11 license by - Jerry Maine . - - * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. - * amd64/amd64-codegen.h: Fix bugs in simd marcos. - -2009-06-24 Jerry Maine - - Contributed under the terms of the MIT/X11 license by - Jerry Maine . +2009-07-13 Zoltan Varga - * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. + * x86/x86-codegen.h: Applied patch from Marian Salaj . + Fix encoding of PMINSW and PMINSD. Fixes #521662. 2009-06-22 Zoltan Varga diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 5bdb79a..ad8e13a 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1756,8 +1756,8 @@ typedef enum { X86_SSE_PMAXUD = 0x3F, /*sse41*/ X86_SSE_PMINSB = 0x38, /*sse41*/ - X86_SSE_PMINSW = 0x39, - X86_SSE_PMINSD = 0xEA,/*sse41*/ + X86_SSE_PMINSW = 0xEA, + X86_SSE_PMINSD = 0x39,/*sse41*/ X86_SSE_PMINUB = 0xDA, X86_SSE_PMINUW = 0x3A, /*sse41*/ -- cgit v1.1 From f44bc9e40cc840bf63bf782aa0338aae3e898f7f Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 20 Jul 2009 20:45:49 +0000 Subject: 2009-07-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding of this instruction. svn path=/trunk/mono/; revision=138242 --- ChangeLog | 5 +++++ amd64/amd64-codegen.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 5b7e5db..e2f66bf 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2009-07-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding + of this instruction. + 2009-07-13 Zoltan Varga * x86/x86-codegen.h: Applied patch from Marian Salaj . diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index adf2392..35a609a 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -775,7 +775,7 @@ typedef union { #define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) -#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3) +#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) #define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) -- cgit v1.1 From fc5d2d293fe800d860e9af4fcd9b19f9be7d4e17 Mon Sep 17 00:00:00 2001 From: Paolo Molaro Date: Fri, 24 Jul 2009 15:00:25 +0000 Subject: Fri Jul 24 16:54:13 CEST 2009 Steven Munroe This patch is contributed under the terms of the MIT/X11 license * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted conversion to support combining addis for bits 32-47 with signed load/store diplacements for bits 48-63. (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32. These instructions are availble to 32-bit programs on 64-bit hardware and 32-bit both starting with PowerISA V2.01. [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6 native mode. [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for ppc32. svn path=/trunk/mono/; revision=138635 --- ChangeLog | 16 ++++++++++++++++ ppc/ppc-codegen.h | 40 +++++++++++++++++++++++++++++----------- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/ChangeLog b/ChangeLog index e2f66bf..b4adbbd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,19 @@ + +Fri Jul 24 16:54:13 CEST 2009 Steven Munroe + + This patch is contributed under the terms of the MIT/X11 license + + * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted + conversion to support combining addis for bits 32-47 with + signed load/store diplacements for bits 48-63. + (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32. + These instructions are availble to 32-bit programs on 64-bit + hardware and 32-bit both starting with PowerISA V2.01. + [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6 + native mode. + [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for + ppc32. + 2009-07-20 Zoltan Varga * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index b1d1ea6..f98082c 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -127,6 +127,7 @@ enum { #define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1)) #define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L) +#define ppc_ha(val) (((val >> 16) + ((val & 0x8000) ? 1 : 0)) & 0xffff) #define ppc_load32(c,D,v) G_STMT_START { \ ppc_lis ((c), (D), (guint32)(v) >> 16); \ @@ -754,6 +755,23 @@ my and Ximian's copyright to this code. ;) /* PPC64 */ +/* The following FP instructions are not are available to 32-bit + implementations (prior to PowerISA-V2.01 but are available to + 32-bit mode programs on 64-bit PowerPC implementations and all + processors compliant with PowerISA-2.01 or later. */ + +#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc)) +#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0) +#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1) + +#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc)) +#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0) +#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1) + +#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc)) +#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0) +#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1) + #ifdef __mono_ppc64__ #define ppc_load_sequence(c,D,v) G_STMT_START { \ @@ -840,17 +858,14 @@ my and Ximian's copyright to this code. ;) #define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0) #define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1) -#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc)) -#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0) -#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1) - -#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc)) -#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0) -#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1) - -#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc)) -#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0) -#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1) +/* These move float to/from instuctions are only available on POWER6 in + native mode. These instruction are faster then the equivalent + store/load because they avoid the store queue and associated delays. + These instructions should only be used in 64-bit mode unless the + kernel preserves the 64-bit GPR on signals and dispatch in 32-bit + mode. The Linux kernel does not. */ +#define ppc_mftgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (735 << 1) | 0) +#define ppc_mffgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (607 << 1) | 0) #define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0) #define ppc_lwa(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((ds) & 0xfffc) | 2) @@ -930,6 +945,9 @@ my and Ximian's copyright to this code. ;) #define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0) #define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0) +#else +/* Always true for 32-bit */ +#define ppc_is_imm32(val) (1) #endif #endif -- cgit v1.1 From c4d98f3131b6b7d0732050c2e0ac7bd05b6c27c2 Mon Sep 17 00:00:00 2001 From: Jerri Maine Date: Tue, 4 Aug 2009 00:31:14 +0000 Subject: Contributed under the terms of the MIT/X11 license by Jerry Maine . * mono/arch/amd64/amd64-codegen.h: Added missing code gen marco for single packed square root. * mono/mini/basic-simd.cs: added test for packed double square root. * mono/mini/cpu-amd64.md: added opcode info for packed double square root. * mono/mini/cpu-x86.md: added opcode info for packed double square root. * mono/mini/mini-ops.h: added IR opcode for packed double square root. * mono/mini/mini-x86.c: added IR to native translation code for packed double square root. * mono/mini/mini-amd64.c: removed todo for packed double square root. * mono/mini/simd-intrinsics.c: added method to IR opcode converstion for packed double square root. svn path=/trunk/mono/; revision=139309 --- ChangeLog | 6 ++++++ amd64/amd64-codegen.h | 2 ++ 2 files changed, 8 insertions(+) diff --git a/ChangeLog b/ChangeLog index b4adbbd..00ad690 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2009-07-03 Jerry Maine + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * amd64/amd64-codegen.h: Added missing code gen marco for single packed square root. Fri Jul 24 16:54:13 CEST 2009 Steven Munroe diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 35a609a..c7ae42a 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -634,6 +634,8 @@ typedef union { #define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) +#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) + #define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) -- cgit v1.1 From 568b4a7ab726e87c664a682193fa57c5521ed23c Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 14 Aug 2009 13:49:01 +0000 Subject: 2009-08-14 Zoltan Varga * arm/arm-codegen.h: Add armv6 MOVW/MOVT. svn path=/trunk/mono/; revision=139918 --- ChangeLog | 4 ++++ arm/arm-codegen.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index 00ad690..cc9f0ec 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2009-08-14 Zoltan Varga + + * arm/arm-codegen.h: Add armv6 MOVW/MOVT. + 2009-07-03 Jerry Maine Contributed under the terms of the MIT/X11 license by diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 5808890..1503318 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1076,6 +1076,13 @@ typedef union { arminstr_t raw; } ARMInstr; +/* ARMv6t2 */ + +#define ARM_MOVW_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (0 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff))) +#define ARM_MOVW_REG_IMM(p, rd, imm16) ARM_MOVW_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL) + +#define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff))) +#define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL) #ifdef __cplusplus } -- cgit v1.1 From 774d55350115d1c4f08dc2a9b015e9502d796cef Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 10 Nov 2009 00:58:49 +0000 Subject: 2009-11-10 Zoltan Varga * arm/arm-codegen.h: Fix the names of the LDMIA/STMIA macros, they don't actually update the base register. svn path=/trunk/mono/; revision=145786 --- arm/arm-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 1503318..e7dc99f 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -449,8 +449,8 @@ typedef struct { ARM_DEF_COND(cond) -#define ARM_LDMIA(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL)) -#define ARM_STMIA(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL)) +#define ARM_LDM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL)) +#define ARM_STM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL)) /* stmdb sp!, {regs} */ #define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL)) -- cgit v1.1 From 282ce11cd7691698334563b95ca4b49e6c32f900 Mon Sep 17 00:00:00 2001 From: Gonzalo Paniagua Javier Date: Fri, 20 Nov 2009 22:34:30 +0000 Subject: removing PLATFORM_WIN32 svn path=/trunk/mono/; revision=146652 --- amd64/amd64-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index c7ae42a..2cef670 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -67,7 +67,7 @@ typedef enum AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; -#ifdef PLATFORM_WIN32 +#ifdef TARGET_WIN32 #define AMD64_ARG_REG1 AMD64_RCX #define AMD64_ARG_REG2 AMD64_RDX #define AMD64_ARG_REG3 AMD64_R8 @@ -79,7 +79,7 @@ typedef enum #define AMD64_ARG_REG4 AMD64_RCX #endif -#ifdef PLATFORM_WIN32 +#ifdef TARGET_WIN32 #define AMD64_CALLEE_REGS ((1< Date: Tue, 23 Mar 2010 20:00:46 +0000 Subject: Primarily, add support for mono_arch_get_throw_corlib_exception and IMT for s390x. Other s390x fixes to instruction sizes, parameter passing, and ARCH settings. svn path=/trunk/mono/; revision=154085 --- s390x/ChangeLog | 4 ++++ s390x/s390x-codegen.h | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/s390x/ChangeLog b/s390x/ChangeLog index d35967b..e756d35 100644 --- a/s390x/ChangeLog +++ b/s390x/ChangeLog @@ -1,3 +1,7 @@ +2010-03-23 Neale Ferguson + + * s390x-codegen.h: Remove duplicate + 2009-06-24 Neale Ferguson * s390x-codegen.h: Add some new instructions. diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 6af46db..7a2e069 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -773,7 +773,6 @@ typedef struct { #define s390_stey(c, r, x, b, d) S390_RXY(c, 0xed66, r, x, b, d) #define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) #define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) -#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d) #define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) #define s390_sthy(c, r, x, b, d) S390_RXY(c, 0xe370, r, x, b, d) #define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) -- cgit v1.1 From 2b562993a3dced62eb48aeedcf38f234b655e86f Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 29 Mar 2010 23:21:23 +0000 Subject: 2010-03-30 Zoltan Varga * arm/*.sh: Remove bash dependency. svn path=/trunk/mono/; revision=154407 --- ChangeLog | 4 ++++ arm/dpiops.sh | 4 ++-- arm/fpaops.sh | 4 ++-- arm/vfpops.sh | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index cc9f0ec..9116df8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2010-03-30 Zoltan Varga + + * arm/*.sh: Remove bash dependency. + 2009-08-14 Zoltan Varga * arm/arm-codegen.h: Add armv6 MOVW/MOVT. diff --git a/arm/dpiops.sh b/arm/dpiops.sh index 1802eec..d3b93ff 100755 --- a/arm/dpiops.sh +++ b/arm/dpiops.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC" CMP_OPCODES="TST TEQ CMP CMN" @@ -6,7 +6,7 @@ MOV_OPCODES="MOV MVN" # $1: opcode list # $2: template -function gen() { +gen() { for i in $1; do sed "s//$i/g" $2.th done diff --git a/arm/fpaops.sh b/arm/fpaops.sh index fa6a280..be19876 100755 --- a/arm/fpaops.sh +++ b/arm/fpaops.sh @@ -1,11 +1,11 @@ -#!/bin/bash +#!/bin/sh DYADIC="ADF MUF SUF RSF DVF RDF POW RPW RMF FML FDV FRD POL" MONADIC="MVF MNF ABS RND SQT LOG EXP SIN COS TAN ASN ACS ATN URD NRM" # $1: opcode list # $2: template -function gen() { +gen() { for i in $1; do sed "s//$i/g" $2.th done diff --git a/arm/vfpops.sh b/arm/vfpops.sh index 4f850f0..bed4a9c 100755 --- a/arm/vfpops.sh +++ b/arm/vfpops.sh @@ -1,11 +1,11 @@ -#!/bin/bash +#!/bin/sh DYADIC="ADD SUB MUL NMUL DIV" MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ" # $1: opcode list # $2: template -function gen() { +gen() { for i in $1; do sed "s//$i/g" $2.th done -- cgit v1.1 From bb66b04f8ca017660ae65afa4b86a33b32d48cdb Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 8 Apr 2010 04:41:44 +0000 Subject: .gitignore svn path=/trunk/mono/; revision=155025 --- arm/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arm/.gitignore b/arm/.gitignore index 0b0e716..978145d 100644 --- a/arm/.gitignore +++ b/arm/.gitignore @@ -10,4 +10,6 @@ /*.exe /*.dll /arm_dpimacros.h +/arm_fpamacros.h +/arm_vfpmacros.h /fixeol.sh -- cgit v1.1 From 80806328ee52ed52783e005f044e8447d34efac5 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 19 May 2010 02:35:46 +0000 Subject: 2010-05-19 Zoltan Varga * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support. svn path=/trunk/mono/; revision=157521 --- ChangeLog | 4 ++++ ppc/ppc-codegen.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 9116df8..231f796 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,7 @@ +2010-05-19 Zoltan Varga + + * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support. + 2010-03-30 Zoltan Varga * arm/*.sh: Remove bash dependency. diff --git a/ppc/ppc-codegen.h b/ppc/ppc-codegen.h index f98082c..55b5060 100644 --- a/ppc/ppc-codegen.h +++ b/ppc/ppc-codegen.h @@ -808,7 +808,7 @@ my and Ximian's copyright to this code. ;) #define ppc_load_func(c,D,v) G_STMT_START { \ ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \ - ppc_ldptr ((c), ppc_r2, 8, ppc_r11); \ + ppc_ldptr ((c), ppc_r2, sizeof (gpointer), ppc_r11); \ ppc_ldptr ((c), (D), 0, ppc_r11); \ } G_STMT_END -- cgit v1.1 From da52cebbb28392e8043a36e8c29f4ceb4f706741 Mon Sep 17 00:00:00 2001 From: Raja R Harinath Date: Sun, 25 Jul 2010 20:09:25 +0530 Subject: EOL handling This set of .gitattributes was automatically generated from the list of files that GIT tried to normalize when I enabled automatic EOL conversion. With this set of attributes, we prevent automated EOL conversion on files that we know will cause trouble down the road. --- arm/.gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 arm/.gitattributes diff --git a/arm/.gitattributes b/arm/.gitattributes new file mode 100644 index 0000000..4819db1 --- /dev/null +++ b/arm/.gitattributes @@ -0,0 +1 @@ +/arm-wmmx.h -crlf -- cgit v1.1 From 881a8fe8dfebf42e0f50228319132001d121c983 Mon Sep 17 00:00:00 2001 From: Elijah Taylor Date: Mon, 9 Aug 2010 17:40:18 +0200 Subject: Add hooks to the codegen macros to support NACL codegen. --- x86/x86-codegen.h | 450 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 429 insertions(+), 21 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ad8e13a..af3e3c6 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -15,6 +15,31 @@ #ifndef X86_H #define X86_H #include + +#ifdef __native_client_codegen__ +#define kNaClAlignment 32 +#define kNaClAlignmentMask (kNaClAlignment - 1) +extern guint8 nacl_align_byte; +#endif /* __native_client_codegen__ */ + + +#if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) +#define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0) +#define x86_call_sequence_pre(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post(inst) \ + (mono_nacl_align_call(&_code_start, &(inst)), _code_start); +#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post_val(inst) \ + (mono_nacl_align_call(&_code_start, &(inst)), _code_start); +#else +#define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0) +#define x86_call_sequence_pre(inst) +#define x86_call_sequence_post(inst) +#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post_val(inst) _code_start +#endif /* __native_client_codegen__ */ + + /* // x86 register numbers */ @@ -278,6 +303,8 @@ typedef union { #define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0) #define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0) +#define kMaxMembaseEmitPadding 6 + #define x86_membase_emit(inst,r,basereg,disp) do {\ if ((basereg) == X86_ESP) { \ if ((disp) == 0) { \ @@ -307,6 +334,8 @@ typedef union { } \ } while (0) +#define kMaxMemindexEmitPadding 6 + #define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ do { \ if ((basereg) == X86_NOBASEREG) { \ @@ -343,7 +372,7 @@ typedef union { * the instruction is inspected for validity and the correct displacement * is inserted. */ -#define x86_patch(ins,target) \ +#define x86_do_patch(ins,target) \ do { \ unsigned char* pos = (ins) + 1; \ int disp, size = 0; \ @@ -367,10 +396,73 @@ typedef union { else assert (0); \ } while (0) +#if defined( __native_client_codegen__ ) && defined(TARGET_X86) + +#define x86_skip_nops(inst) \ + do { \ + int in_nop = 0; \ + do { \ + in_nop = 0; \ + if (inst[0] == 0x90) { \ + in_nop = 1; \ + inst += 1; \ + } \ + if (inst[0] == 0x8b && inst[1] == 0xc0) { \ + in_nop = 1; \ + inst += 2; \ + } \ + if (inst[0] == 0x8d && inst[1] == 0x6d \ + && inst[2] == 0x00) { \ + in_nop = 1; \ + inst += 3; \ + } \ + if (inst[0] == 0x8d && inst[1] == 0x64 \ + && inst[2] == 0x24 && inst[3] == 0x00) { \ + in_nop = 1; \ + inst += 4; \ + } \ + /* skip inst+=5 case because it's the 4-byte + 1-byte case */ \ + if (inst[0] == 0x8d && inst[1] == 0xad \ + && inst[2] == 0x00 && inst[3] == 0x00 \ + && inst[4] == 0x00 && inst[5] == 0x00) { \ + in_nop = 1; \ + inst += 6; \ + } \ + if (inst[0] == 0x8d && inst[1] == 0xa4 \ + && inst[2] == 0x24 && inst[3] == 0x00 \ + && inst[4] == 0x00 && inst[5] == 0x00 \ + && inst[6] == 0x00 ) { \ + in_nop = 1; \ + inst += 7; \ + } \ + } while ( in_nop ); \ + } while (0) + +#define x86_patch(ins,target) \ + do { \ + unsigned char* inst = (ins); \ + x86_skip_nops((inst)); \ + x86_do_patch((inst), (target)); \ + } while (0) + +#else +#define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0) +#endif /* __native_client_codegen__ */ + +#ifdef __native_client_codegen__ +/* The breakpoint instruction is illegal in Native Client, although the HALT */ +/* instruction is allowed. The breakpoint is used several places in mini-x86.c */ +/* and exceptions-x86.c. */ +#define x86_breakpoint(inst) \ + do { \ + *(inst)++ = 0xf4; \ + } while (0) +#else #define x86_breakpoint(inst) \ do { \ *(inst)++ = 0xcc; \ } while (0) +#endif #define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0) #define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0) @@ -380,7 +472,15 @@ typedef union { #define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) #define x86_movsd(inst) x86_movsl((inst)) -#define x86_prefix(inst,p) do { *(inst)++ =(unsigned char) (p); } while (0) +/* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */ +/* This keeps us from having to call x86_codegen_pre with specific */ +/* knowledge of the size of the instruction that follows it, and */ +/* localizes the alignment requirement to this spot. */ +#define x86_prefix(inst,p) \ + do { \ + x86_codegen_pre(&(inst), kNaClAlignment - 1); \ + *(inst)++ =(unsigned char) (p); \ + } while (0) #define x86_rdtsc(inst) \ do { \ @@ -390,6 +490,7 @@ typedef union { #define x86_cmpxchg_reg_reg(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xb1; \ x86_reg_emit ((inst), (reg), (dreg)); \ @@ -397,6 +498,7 @@ typedef union { #define x86_cmpxchg_mem_reg(inst,mem,reg) \ do { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xb1; \ x86_mem_emit ((inst), (reg), (mem)); \ @@ -404,6 +506,7 @@ typedef union { #define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xb1; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ @@ -411,6 +514,7 @@ typedef union { #define x86_xchg_reg_reg(inst,dreg,reg,size) \ do { \ + x86_codegen_pre(&(inst), 2); \ if ((size) == 1) \ *(inst)++ = (unsigned char)0x86; \ else \ @@ -420,6 +524,7 @@ typedef union { #define x86_xchg_mem_reg(inst,mem,reg,size) \ do { \ + x86_codegen_pre(&(inst), 6); \ if ((size) == 1) \ *(inst)++ = (unsigned char)0x86; \ else \ @@ -429,6 +534,7 @@ typedef union { #define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ if ((size) == 1) \ *(inst)++ = (unsigned char)0x86; \ else \ @@ -438,6 +544,7 @@ typedef union { #define x86_xadd_reg_reg(inst,dreg,reg,size) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x0F; \ if ((size) == 1) \ *(inst)++ = (unsigned char)0xC0; \ @@ -448,6 +555,7 @@ typedef union { #define x86_xadd_mem_reg(inst,mem,reg,size) \ do { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x0F; \ if ((size) == 1) \ *(inst)++ = (unsigned char)0xC0; \ @@ -458,6 +566,7 @@ typedef union { #define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0F; \ if ((size) == 1) \ *(inst)++ = (unsigned char)0xC0; \ @@ -468,12 +577,14 @@ typedef union { #define x86_inc_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xff; \ x86_mem_emit ((inst), 0, (mem)); \ } while (0) #define x86_inc_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) @@ -482,12 +593,14 @@ typedef union { #define x86_dec_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xff; \ x86_mem_emit ((inst), 1, (mem)); \ } while (0) #define x86_dec_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 1, (basereg), (disp)); \ } while (0) @@ -496,36 +609,42 @@ typedef union { #define x86_not_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 2, (mem)); \ } while (0) #define x86_not_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 2, (basereg), (disp)); \ } while (0) #define x86_not_reg(inst,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit ((inst), 2, (reg)); \ } while (0) #define x86_neg_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 3, (mem)); \ } while (0) #define x86_neg_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 3, (basereg), (disp)); \ } while (0) #define x86_neg_reg(inst,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit ((inst), 3, (reg)); \ } while (0) @@ -535,15 +654,18 @@ typedef union { #define x86_alu_reg_imm(inst,opc,reg,imm) \ do { \ if ((reg) == X86_EAX) { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ x86_imm_emit32 ((inst), (imm)); \ break; \ } \ if (x86_is_imm8((imm))) { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x81; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -553,10 +675,12 @@ typedef union { #define x86_alu_mem_imm(inst,opc,mem,imm) \ do { \ if (x86_is_imm8((imm))) { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x83; \ x86_mem_emit ((inst), (opc), (mem)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 10); \ *(inst)++ = (unsigned char)0x81; \ x86_mem_emit ((inst), (opc), (mem)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -566,10 +690,12 @@ typedef union { #define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \ do { \ if (x86_is_imm8((imm))) { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x83; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x81; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -578,6 +704,7 @@ typedef union { #define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x80; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ @@ -585,18 +712,21 @@ typedef union { #define x86_alu_mem_reg(inst,opc,mem,reg) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ x86_mem_emit ((inst), (reg), (mem)); \ } while (0) #define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define x86_alu_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) @@ -612,24 +742,28 @@ typedef union { */ #define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \ x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \ } while (0) #define x86_alu_reg_mem(inst,opc,reg,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_mem_emit ((inst), (reg), (mem)); \ } while (0) #define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define x86_test_reg_imm(inst,reg,imm) \ do { \ + x86_codegen_pre(&(inst), 6); \ if ((reg) == X86_EAX) { \ *(inst)++ = (unsigned char)0xa9; \ } else { \ @@ -641,6 +775,7 @@ typedef union { #define x86_test_mem_imm(inst,mem,imm) \ do { \ + x86_codegen_pre(&(inst), 10); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -648,6 +783,7 @@ typedef union { #define x86_test_membase_imm(inst,basereg,disp,imm) \ do { \ + x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -655,18 +791,21 @@ typedef union { #define x86_test_reg_reg(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0x85; \ x86_reg_emit ((inst), (reg), (dreg)); \ } while (0) #define x86_test_mem_reg(inst,mem,reg) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x85; \ x86_mem_emit ((inst), (reg), (mem)); \ } while (0) #define x86_test_membase_reg(inst,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x85; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) @@ -674,9 +813,11 @@ typedef union { #define x86_shift_reg_imm(inst,opc,reg,imm) \ do { \ if ((imm) == 1) { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd1; \ x86_reg_emit ((inst), (opc), (reg)); \ } else { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0xc1; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ @@ -686,9 +827,11 @@ typedef union { #define x86_shift_mem_imm(inst,opc,mem,imm) \ do { \ if ((imm) == 1) { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xd1; \ x86_mem_emit ((inst), (opc), (mem)); \ } else { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0xc1; \ x86_mem_emit ((inst), (opc), (mem)); \ x86_imm_emit8 ((inst), (imm)); \ @@ -698,9 +841,11 @@ typedef union { #define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \ do { \ if ((imm) == 1) { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xd1; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ } else { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0xc1; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ @@ -709,18 +854,21 @@ typedef union { #define x86_shift_reg(inst,opc,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd3; \ x86_reg_emit ((inst), (opc), (reg)); \ } while (0) #define x86_shift_mem(inst,opc,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xd3; \ x86_mem_emit ((inst), (opc), (mem)); \ } while (0) #define x86_shift_membase(inst,opc,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xd3; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ } while (0) @@ -731,6 +879,7 @@ typedef union { #define x86_shrd_reg(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xad; \ x86_reg_emit ((inst), (reg), (dreg)); \ @@ -738,6 +887,7 @@ typedef union { #define x86_shrd_reg_imm(inst,dreg,reg,shamt) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xac; \ x86_reg_emit ((inst), (reg), (dreg)); \ @@ -746,6 +896,7 @@ typedef union { #define x86_shld_reg(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xa5; \ x86_reg_emit ((inst), (reg), (dreg)); \ @@ -753,6 +904,7 @@ typedef union { #define x86_shld_reg_imm(inst,dreg,reg,shamt) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xa4; \ x86_reg_emit ((inst), (reg), (dreg)); \ @@ -764,18 +916,21 @@ typedef union { */ #define x86_mul_reg(inst,reg,is_signed) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \ } while (0) #define x86_mul_mem(inst,mem,is_signed) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \ } while (0) #define x86_mul_membase(inst,basereg,disp,is_signed) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ } while (0) @@ -785,6 +940,7 @@ typedef union { */ #define x86_imul_reg_reg(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xaf; \ x86_reg_emit ((inst), (dreg), (reg)); \ @@ -792,6 +948,7 @@ typedef union { #define x86_imul_reg_mem(inst,reg,mem) \ do { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xaf; \ x86_mem_emit ((inst), (reg), (mem)); \ @@ -799,6 +956,7 @@ typedef union { #define x86_imul_reg_membase(inst,reg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0xaf; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ @@ -810,10 +968,12 @@ typedef union { #define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \ do { \ if (x86_is_imm8 ((imm))) { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x6b; \ x86_reg_emit ((inst), (dreg), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x69; \ x86_reg_emit ((inst), (dreg), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -823,10 +983,12 @@ typedef union { #define x86_imul_reg_mem_imm(inst,reg,mem,imm) \ do { \ if (x86_is_imm8 ((imm))) { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x6b; \ x86_mem_emit ((inst), (reg), (mem)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x69; \ x86_reg_emit ((inst), (reg), (mem)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -836,10 +998,12 @@ typedef union { #define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \ do { \ if (x86_is_imm8 ((imm))) { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x6b; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x69; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -853,24 +1017,28 @@ typedef union { #define x86_div_reg(inst,reg,is_signed) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xf7; \ x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \ } while (0) #define x86_div_mem(inst,mem,is_signed) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \ } while (0) #define x86_div_membase(inst,basereg,disp,is_signed) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \ } while (0) #define x86_mov_mem_reg(inst,mem,reg,size) \ do { \ + x86_codegen_pre(&(inst), 7); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -882,6 +1050,7 @@ typedef union { #define x86_mov_regp_reg(inst,regp,reg,size) \ do { \ + x86_codegen_pre(&(inst), 3); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -893,6 +1062,7 @@ typedef union { #define x86_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -904,6 +1074,7 @@ typedef union { #define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -915,6 +1086,7 @@ typedef union { #define x86_mov_reg_reg(inst,dreg,reg,size) \ do { \ + x86_codegen_pre(&(inst), 3); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -926,6 +1098,7 @@ typedef union { #define x86_mov_reg_mem(inst,reg,mem,size) \ do { \ + x86_codegen_pre(&(inst), 7); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -935,8 +1108,11 @@ typedef union { x86_mem_emit ((inst), (reg), (mem)); \ } while (0) +#define kMovRegMembasePadding (2 + kMaxMembaseEmitPadding) + #define x86_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ + x86_codegen_pre(&(inst), kMovRegMembasePadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -948,6 +1124,7 @@ typedef union { #define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ @@ -964,6 +1141,7 @@ typedef union { #define x86_mov_reg_imm(inst,reg,imm) \ do { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0xb8 + (reg); \ x86_imm_emit32 ((inst), (imm)); \ } while (0) @@ -971,15 +1149,18 @@ typedef union { #define x86_mov_mem_imm(inst,mem,imm,size) \ do { \ if ((size) == 1) { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0xc6; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ + x86_codegen_pre(&(inst), 9); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0xc7; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 10); \ *(inst)++ = (unsigned char)0xc7; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -989,15 +1170,18 @@ typedef union { #define x86_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ if ((size) == 1) { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ + x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -1007,15 +1191,18 @@ typedef union { #define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \ do { \ if ((size) == 1) { \ + x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0xc6; \ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ + x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0xc7; \ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ + x86_codegen_pre(&(inst), 5 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0xc7; \ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ x86_imm_emit32 ((inst), (imm)); \ @@ -1024,18 +1211,21 @@ typedef union { #define x86_lea_mem(inst,reg,mem) \ do { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0x8d; \ x86_mem_emit ((inst), (reg), (mem)); \ } while (0) #define x86_lea_membase(inst,reg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x8d; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0x8d; \ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ } while (0) @@ -1044,6 +1234,7 @@ typedef union { do { \ unsigned char op = 0xb6; \ g_assert (is_half || X86_IS_BYTE_REG (reg)); \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ @@ -1054,6 +1245,7 @@ typedef union { #define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \ do { \ unsigned char op = 0xb6; \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ @@ -1064,6 +1256,7 @@ typedef union { #define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \ do { \ unsigned char op = 0xb6; \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ @@ -1074,6 +1267,7 @@ typedef union { #define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \ do { \ unsigned char op = 0xb6; \ + x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) op += 0x08; \ if ((is_half)) op += 0x01; \ @@ -1086,18 +1280,21 @@ typedef union { #define x86_fp_op_mem(inst,opc,mem,is_double) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ x86_mem_emit ((inst), (opc), (mem)); \ } while (0) #define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ } while (0) #define x86_fp_op(inst,opc,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd8; \ *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \ } while (0) @@ -1105,6 +1302,7 @@ typedef union { #define x86_fp_op_reg(inst,opc,index,pop_stack) \ do { \ static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \ *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \ } while (0) @@ -1118,126 +1316,147 @@ typedef union { */ #define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \ x86_membase_emit ((inst), opc, (basereg), (disp)); \ } while (0) #define x86_fstp(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdd; \ *(inst)++ = (unsigned char)0xd8+(index); \ } while (0) #define x86_fcompp(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xde; \ *(inst)++ = (unsigned char)0xd9; \ } while (0) #define x86_fucompp(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xda; \ *(inst)++ = (unsigned char)0xe9; \ } while (0) #define x86_fnstsw(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdf; \ *(inst)++ = (unsigned char)0xe0; \ } while (0) #define x86_fnstcw(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xd9; \ x86_mem_emit ((inst), 7, (mem)); \ } while (0) #define x86_fnstcw_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xd9; \ x86_membase_emit ((inst), 7, (basereg), (disp)); \ } while (0) #define x86_fldcw(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xd9; \ x86_mem_emit ((inst), 5, (mem)); \ } while (0) #define x86_fldcw_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xd9; \ x86_membase_emit ((inst), 5, (basereg), (disp)); \ } while (0) #define x86_fchs(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xe0; \ } while (0) #define x86_frem(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xf8; \ } while (0) #define x86_fxch(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \ } while (0) #define x86_fcomi(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdb; \ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ } while (0) #define x86_fcomip(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdf; \ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \ } while (0) #define x86_fucomi(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdb; \ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ } while (0) #define x86_fucomip(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xdf; \ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \ } while (0) #define x86_fld(inst,mem,is_double) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ x86_mem_emit ((inst), 0, (mem)); \ } while (0) #define x86_fld_membase(inst,basereg,disp,is_double) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) #define x86_fld80_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xdb; \ x86_mem_emit ((inst), 5, (mem)); \ } while (0) #define x86_fld80_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xdb; \ x86_membase_emit ((inst), 5, (basereg), (disp)); \ } while (0) #define x86_fild(inst,mem,is_long) \ do { \ + x86_codegen_pre(&(inst), 6); \ if ((is_long)) { \ *(inst)++ = (unsigned char)0xdf; \ x86_mem_emit ((inst), 5, (mem)); \ @@ -1249,6 +1468,7 @@ typedef union { #define x86_fild_membase(inst,basereg,disp,is_long) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ if ((is_long)) { \ *(inst)++ = (unsigned char)0xdf; \ x86_membase_emit ((inst), 5, (basereg), (disp)); \ @@ -1260,42 +1480,49 @@ typedef union { #define x86_fld_reg(inst,index) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \ } while (0) #define x86_fldz(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xee; \ } while (0) #define x86_fld1(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xe8; \ } while (0) #define x86_fldpi(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xd9; \ *(inst)++ = (unsigned char)0xeb; \ } while (0) #define x86_fst(inst,mem,is_double,pop_stack) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \ } while (0) #define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \ x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \ } while (0) #define x86_fst80_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xdb; \ x86_mem_emit ((inst), 7, (mem)); \ } while (0) @@ -1303,6 +1530,7 @@ typedef union { #define x86_fst80_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xdb; \ x86_membase_emit ((inst), 7, (basereg), (disp)); \ } while (0) @@ -1310,6 +1538,7 @@ typedef union { #define x86_fist_pop(inst,mem,is_long) \ do { \ + x86_codegen_pre(&(inst), 6); \ if ((is_long)) { \ *(inst)++ = (unsigned char)0xdf; \ x86_mem_emit ((inst), 7, (mem)); \ @@ -1321,6 +1550,7 @@ typedef union { #define x86_fist_pop_membase(inst,basereg,disp,is_long) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ if ((is_long)) { \ *(inst)++ = (unsigned char)0xdf; \ x86_membase_emit ((inst), 7, (basereg), (disp)); \ @@ -1332,6 +1562,7 @@ typedef union { #define x86_fstsw(inst) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x9b; \ *(inst)++ = (unsigned char)0xdf; \ *(inst)++ = (unsigned char)0xe0; \ @@ -1345,6 +1576,7 @@ typedef union { */ #define x86_fist_membase(inst,basereg,disp,is_int) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ if ((is_int)) { \ *(inst)++ = (unsigned char)0xdb; \ x86_membase_emit ((inst), 2, (basereg), (disp)); \ @@ -1362,24 +1594,28 @@ typedef union { #define x86_push_regp(inst,reg) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xff; \ x86_regp_emit ((inst), 6, (reg)); \ } while (0) #define x86_push_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xff; \ x86_mem_emit ((inst), 6, (mem)); \ } while (0) #define x86_push_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg), (disp)); \ } while (0) #define x86_push_memindex(inst,basereg,disp,indexreg,shift) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \ *(inst)++ = (unsigned char)0xff; \ x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \ } while (0) @@ -1390,9 +1626,11 @@ typedef union { do { \ int _imm = (int) (imm); \ if (x86_is_imm8 (_imm)) { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0x6A; \ x86_imm_emit8 ((inst), (_imm)); \ } else { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0x68; \ x86_imm_emit32 ((inst), (_imm)); \ } \ @@ -1405,12 +1643,14 @@ typedef union { #define x86_pop_mem(inst,mem) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x87; \ x86_mem_emit ((inst), 0, (mem)); \ } while (0) #define x86_pop_membase(inst,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x87; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) @@ -1422,34 +1662,70 @@ typedef union { #define x86_loop(inst,imm) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xe2; \ x86_imm_emit8 ((inst), (imm)); \ } while (0) #define x86_loope(inst,imm) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xe1; \ x86_imm_emit8 ((inst), (imm)); \ } while (0) #define x86_loopne(inst,imm) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xe0; \ x86_imm_emit8 ((inst), (imm)); \ } while (0) #define x86_jump32(inst,imm) \ do { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0xe9; \ x86_imm_emit32 ((inst), (imm)); \ } while (0) #define x86_jump8(inst,imm) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = (unsigned char)0xeb; \ x86_imm_emit8 ((inst), (imm)); \ } while (0) + +#ifdef __native_client_codegen__ +#define x86_jump_reg(inst,reg) do { \ + x86_codegen_pre(&(inst), 5); \ + *(inst)++ = (unsigned char)0x83; /* and */ \ + x86_reg_emit ((inst), 4, (reg)); /* reg */ \ + *(inst)++ = (unsigned char)nacl_align_byte; \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 4, (reg)); \ + } while (0) + +/* Let's hope ECX is available for these... */ +#define x86_jump_mem(inst,mem) do { \ + x86_mov_reg_mem(inst, (X86_ECX), (mem), 4); \ + x86_jump_reg(inst, (X86_ECX)); \ + } while (0) + +#define x86_jump_membase(inst,basereg,disp) do { \ + x86_mov_reg_membase(inst, (X86_ECX), basereg, disp, 4); \ + x86_jump_reg(inst, (X86_ECX)); \ + } while (0) + +/* like x86_jump_membase, but force a 32-bit displacement */ +#define x86_jump_membase32(inst,basereg,disp) do { \ + x86_codegen_pre(&(inst), 6); \ + *(inst)++ = (unsigned char)0x8b; \ + x86_address_byte ((inst), 2, X86_ECX, (basereg)); \ + x86_imm_emit32 ((inst), (disp)); \ + x86_jump_reg(inst, (X86_ECX)); \ + } while (0) +#else /* __native_client_codegen__ */ #define x86_jump_reg(inst,reg) \ do { \ *(inst)++ = (unsigned char)0xff; \ @@ -1467,17 +1743,20 @@ typedef union { *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 4, (basereg), (disp)); \ } while (0) - +#endif /* __native_client_codegen__ */ /* * target is a pointer in our buffer. */ #define x86_jump_code(inst,target) \ do { \ - int t = (unsigned char*)(target) - (inst) - 2; \ + int t; \ + x86_codegen_pre(&(inst), 2); \ + t = (unsigned char*)(target) - (inst) - 2; \ if (x86_is_imm8(t)) { \ x86_jump8 ((inst), t); \ } else { \ - t -= 3; \ + x86_codegen_pre(&(inst), 5); \ + t = (unsigned char*)(target) - (inst) - 5; \ x86_jump32 ((inst), t); \ } \ } while (0) @@ -1495,6 +1774,7 @@ typedef union { #define x86_branch8(inst,cond,imm,is_signed) \ do { \ + x86_codegen_pre(&(inst), 2); \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)]; \ else \ @@ -1504,6 +1784,7 @@ typedef union { #define x86_branch32(inst,cond,imm,is_signed) \ do { \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ @@ -1514,11 +1795,14 @@ typedef union { #define x86_branch(inst,cond,target,is_signed) \ do { \ - int offset = (target) - (inst) - 2; \ + int offset; \ + x86_codegen_pre(&(inst), 2); \ + offset = (target) - (inst) - 2; \ if (x86_is_imm8 ((offset))) \ x86_branch8 ((inst), (cond), offset, (is_signed)); \ else { \ - offset -= 4; \ + x86_codegen_pre(&(inst), 6); \ + offset = (target) - (inst) - 6; \ x86_branch32 ((inst), (cond), offset, (is_signed)); \ } \ } while (0) @@ -1537,6 +1821,7 @@ typedef union { #define x86_set_reg(inst,cond,reg,is_signed) \ do { \ g_assert (X86_IS_BYTE_REG (reg)); \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ @@ -1547,6 +1832,7 @@ typedef union { #define x86_set_mem(inst,cond,mem,is_signed) \ do { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ @@ -1557,6 +1843,7 @@ typedef union { #define x86_set_membase(inst,cond,basereg,disp,is_signed) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \ @@ -1565,12 +1852,48 @@ typedef union { x86_membase_emit ((inst), 0, (basereg), (disp)); \ } while (0) -#define x86_call_imm(inst,disp) \ +#define x86_call_imm_body(inst,disp) \ do { \ *(inst)++ = (unsigned char)0xe8; \ x86_imm_emit32 ((inst), (int)(disp)); \ } while (0) +#define x86_call_imm(inst,disp) \ + do { \ + x86_call_sequence_pre((inst)); \ + x86_call_imm_body((inst), (disp)); \ + x86_call_sequence_post((inst)); \ + } while (0) + +#ifdef __native_client_codegen__ +#define x86_call_reg_internal(inst,reg) \ + do { \ + x86_codegen_pre(&(inst), 5); \ + *(inst)++ = (unsigned char)0x83; /* and */ \ + x86_reg_emit ((inst), 4, (reg)); /* reg */ \ + *(inst)++ = (unsigned char)nacl_align_byte; \ + *(inst)++ = (unsigned char)0xff; /* call */ \ + x86_reg_emit ((inst), 2, (reg)); /* reg */ \ + } while (0) + +#define x86_call_reg(inst, reg) do { \ + x86_call_sequence_pre((inst)); \ + x86_call_reg_internal(inst, reg); \ + x86_call_sequence_post((inst)); \ + } while (0) + + +/* It appears that x86_call_mem() is never used, so I'm leaving it out. */ +#define x86_call_membase(inst,basereg,disp) do { \ + x86_call_sequence_pre((inst)); \ + /* x86_mov_reg_membase() inlined so its fixed size */ \ + *(inst)++ = (unsigned char)0x8b; \ + x86_address_byte ((inst), 2, (X86_ECX), (basereg)); \ + x86_imm_emit32 ((inst), (disp)); \ + x86_call_reg_internal(inst, X86_ECX); \ + x86_call_sequence_post((inst)); \ + } while (0) +#else /* __native_client_codegen__ */ #define x86_call_reg(inst,reg) \ do { \ *(inst)++ = (unsigned char)0xff; \ @@ -1588,12 +1911,57 @@ typedef union { *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 2, (basereg), (disp)); \ } while (0) +#endif /* __native_client_codegen__ */ + + +#ifdef __native_client_codegen__ #define x86_call_code(inst,target) \ do { \ - int _x86_offset = (unsigned char*)(target) - (inst); \ + int _x86_offset; \ + guint8* _aligned_start; \ + x86_call_sequence_pre_val ((inst)); \ + _x86_offset = (unsigned char*)(target) - (inst); \ + _x86_offset -= 5; \ + x86_call_imm_body ((inst), _x86_offset); \ + _aligned_start = x86_call_sequence_post_val ((inst)); \ + _x86_offset = (unsigned char*)(target) - (_aligned_start); \ _x86_offset -= 5; \ - x86_call_imm ((inst), _x86_offset); \ + x86_call_imm_body ((_aligned_start), _x86_offset); \ + } while (0) + +#define SIZE_OF_RET 6 +#define x86_ret(inst) do { \ + *(inst)++ = (unsigned char)0x59; /* pop ecx */ \ + x86_codegen_pre(&(inst), 5); \ + *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \ + *(inst)++ = (unsigned char)0xe1; \ + *(inst)++ = (unsigned char)nacl_align_byte; \ + *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \ + *(inst)++ = (unsigned char)0xe1; \ + } while (0) + +/* pop return address */ +/* pop imm bytes from stack */ +/* return */ +#define x86_ret_imm(inst,imm) do { \ + *(inst)++ = (unsigned char)0x59; /* pop ecx */ \ + x86_alu_reg_imm ((inst), X86_ADD, X86_ESP, imm); \ + x86_codegen_pre(&(inst), 5); \ + *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \ + *(inst)++ = (unsigned char)0xe1; \ + *(inst)++ = (unsigned char)nacl_align_byte; \ + *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \ + *(inst)++ = (unsigned char)0xe1; \ +} while (0) +#else /* __native_client_codegen__ */ + +#define x86_call_code(inst,target) \ + do { \ + int _x86_offset; \ + _x86_offset = (unsigned char*)(target) - (inst); \ + _x86_offset -= 5; \ + x86_call_imm_body ((inst), _x86_offset); \ } while (0) #define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) @@ -1603,13 +1971,16 @@ typedef union { if ((imm) == 0) { \ x86_ret ((inst)); \ } else { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0xc2; \ x86_imm_emit16 ((inst), (imm)); \ } \ } while (0) +#endif /* __native_client_codegen__ */ #define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char) 0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ @@ -1620,6 +1991,7 @@ typedef union { #define x86_cmov_mem(inst,cond,is_signed,reg,mem) \ do { \ + x86_codegen_pre(&(inst), 7); \ *(inst)++ = (unsigned char) 0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ @@ -1630,6 +2002,7 @@ typedef union { #define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char) 0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \ @@ -1640,6 +2013,7 @@ typedef union { #define x86_enter(inst,framesize) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0xc8; \ x86_imm_emit16 ((inst), (framesize)); \ *(inst)++ = 0; \ @@ -1648,17 +2022,17 @@ typedef union { #define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) #define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0) -#define x86_fsin(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) -#define x86_fcos(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) -#define x86_fabs(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) -#define x86_ftst(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0) -#define x86_fxam(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0) -#define x86_fpatan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) -#define x86_fprem(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) -#define x86_fprem1(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) -#define x86_frndint(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0) -#define x86_fsqrt(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0) -#define x86_fptan(inst) do { *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0) +#define x86_fsin(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0) +#define x86_fcos(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0) +#define x86_fabs(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0) +#define x86_ftst(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0) +#define x86_fxam(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0) +#define x86_fpatan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0) +#define x86_fprem(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0) +#define x86_fprem1(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0) +#define x86_frndint(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0) +#define x86_fsqrt(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0) +#define x86_fptan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0) #define x86_padding(inst,size) \ do { \ @@ -1686,6 +2060,14 @@ typedef union { } \ } while (0) +#ifdef __native_client_codegen__ + +#define kNaClLengthOfCallReg 5 +#define kNaClLengthOfCallImm 5 +#define kNaClLengthOfCallMembase (kNaClLengthOfCallReg + 6) + +#endif /* __native_client_codegen__ */ + #define x86_prolog(inst,frame_size,reg_mask) \ do { \ unsigned i, m = 1; \ @@ -1853,6 +2235,7 @@ typedef enum { /* minimal SSE* support */ #define x86_movsd_reg_membase(inst,dreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf2; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ @@ -1861,6 +2244,7 @@ typedef enum { #define x86_cvttsd2si(inst,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0xf2; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x2c; \ @@ -1869,6 +2253,7 @@ typedef enum { #define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0F; \ *(inst)++ = (unsigned char)(opc); \ x86_reg_emit ((inst), (dreg), (reg)); \ @@ -1876,6 +2261,7 @@ typedef enum { #define x86_sse_alu_reg_membase(inst,opc,sreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)(opc); \ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ @@ -1883,6 +2269,7 @@ typedef enum { #define x86_sse_alu_membase_reg(inst,opc,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0F; \ *(inst)++ = (unsigned char)(opc); \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ @@ -1891,30 +2278,35 @@ typedef enum { #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x66; \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) #define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x66; \ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ } while (0) #define x86_sse_alu_pd_reg_membase(inst,opc,dreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x66; \ x86_sse_alu_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \ } while (0) #define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \ do { \ + x86_codegen_pre(&(inst), 5); \ x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \ *(inst)++ = (unsigned char)(imm); \ } while (0) #define x86_sse_alu_pd_reg_membase_imm(inst,opc,dreg,basereg,disp,imm) \ do { \ + x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \ x86_sse_alu_pd_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \ *(inst)++ = (unsigned char)(imm); \ } while (0) @@ -1927,6 +2319,7 @@ typedef enum { #define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \ do { \ + x86_codegen_pre(&(inst), 4); \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ *(inst)++ = (unsigned char)imm; \ } while (0) @@ -1934,12 +2327,14 @@ typedef enum { #define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0xF2; \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) #define x86_sse_alu_sd_membase_reg(inst,opc,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xF2; \ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ } while (0) @@ -1947,12 +2342,14 @@ typedef enum { #define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0xF3; \ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \ } while (0) #define x86_sse_alu_ss_membase_reg(inst,opc,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xF3; \ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \ } while (0) @@ -1961,6 +2358,7 @@ typedef enum { #define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \ do { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0x0F; \ *(inst)++ = (unsigned char)0x38; \ @@ -1970,6 +2368,7 @@ typedef enum { #define x86_movups_reg_membase(inst,sreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ @@ -1977,6 +2376,7 @@ typedef enum { #define x86_movups_membase_reg(inst,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ @@ -1984,6 +2384,7 @@ typedef enum { #define x86_movaps_reg_membase(inst,sreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x28; \ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \ @@ -1991,6 +2392,7 @@ typedef enum { #define x86_movaps_membase_reg(inst,basereg,disp,reg) \ do { \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x29; \ x86_membase_emit ((inst), (reg), (basereg), (disp)); \ @@ -1998,6 +2400,7 @@ typedef enum { #define x86_movaps_reg_reg(inst,dreg,sreg) \ do { \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x28; \ x86_reg_emit ((inst), (dreg), (sreg)); \ @@ -2006,6 +2409,7 @@ typedef enum { #define x86_movd_reg_xreg(inst,dreg,sreg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x7e; \ @@ -2014,6 +2418,7 @@ typedef enum { #define x86_movd_xreg_reg(inst,dreg,sreg) \ do { \ + x86_codegen_pre(&(inst), 4); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x6e; \ @@ -2022,6 +2427,7 @@ typedef enum { #define x86_movd_xreg_membase(inst,sreg,basereg,disp) \ do { \ + x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0x66; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x6e; \ @@ -2030,6 +2436,7 @@ typedef enum { #define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \ do { \ + x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x70; \ @@ -2039,6 +2446,7 @@ typedef enum { #define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \ do { \ + x86_codegen_pre(&(inst), 5); \ x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \ x86_imm_emit8 ((inst), (imm)); \ } while (0) -- cgit v1.1 From 7981b77489eba9fafe98b764ae8c423143e55a25 Mon Sep 17 00:00:00 2001 From: Mark Mason Date: Wed, 18 Aug 2010 23:39:36 +0800 Subject: Simplify test for MIPS imm16 operands. Code contributed under the MIT/X11 license. --- mips/mips-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h index e05bc38..20ad367 100644 --- a/mips/mips-codegen.h +++ b/mips/mips-codegen.h @@ -193,7 +193,7 @@ enum { #define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func))) #define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun))) -#define mips_is_imm16(val) ((gint)(val) >= (gint)-(1<<15) && (gint)(val) <= (gint)((1<<15)-1)) +#define mips_is_imm16(val) ((gint)(gshort)(gint)(val) == (gint)(val)) /* Load always using lui/addiu pair (for later patching) */ #define mips_load(c,D,v) do { \ -- cgit v1.1 From aa974c33a3cee416fc456053164835acbf81df70 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 24 Sep 2010 11:28:46 -0300 Subject: Implement amd64 support for OP_CARDTABLE. * amd64-codegen.h (amd64_alu_reg_membase_size): Add support for RIP based addressing. * cpu-amd64.md: Add card_table_wbarrier. * mini-amd64.c (mono_arch_output_basic_block): Emit the new OP. * mini-amd64.c (mono_arch_emit_exceptions): Handle another kind of patch-info - GC_CARD_TABLE_ADDR. This is required because we can neither have 64bits immediates with amd64 or 2 scratch regiters with current regalloc. * mini-amd64.h: Define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER. --- amd64/amd64-codegen.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 2cef670..7ca557d 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -188,6 +188,14 @@ typedef union { #define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ + do { \ + amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + amd64_membase_emit (inst, reg, basereg, disp); \ +} while (0) + + #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ if ((size) == 2) \ @@ -983,7 +991,7 @@ typedef union { //#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); } while (0) #define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); } while (0) #define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); } while (0) -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) +//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) #define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); } while (0) #define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); } while (0) #define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); } while (0) -- cgit v1.1 From cfdf246cd2ffd65bd25e09f1d66bb55d57bf8953 Mon Sep 17 00:00:00 2001 From: Elijah Taylor Date: Tue, 14 Dec 2010 14:37:36 -0800 Subject: Changes to mono/arch/amd64 for Native Client --- amd64/amd64-codegen.h | 964 +++++++++++++++++++++++++++++++++++++------------- amd64/tramp.c | 22 +- 2 files changed, 728 insertions(+), 258 deletions(-) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 7ca557d..8684a5c 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -67,6 +67,32 @@ typedef enum AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } AMD64_REX_Bits; +#if defined(__default_codegen__) + +#define amd64_codegen_pre(inst) +#define amd64_codegen_post(inst) + +#elif defined(__native_client_codegen__) + +#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); +#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); + +/* Because of rex prefixes, etc, call sequences are not constant size. */ +/* These pre- and post-sequence hooks remedy this by aligning the call */ +/* sequence after we emit it, since we will know the exact size then. */ +#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); +#define amd64_call_sequence_post(inst) \ + (mono_nacl_align_call(&_code_start, &(inst)), _code_start); + +/* Native client can load/store using one of the following registers */ +/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ +/* its upper 32 bits cleared and reference memory using r15 as the base. */ +#define amd64_is_valid_nacl_base(reg) \ + ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ + (reg) == AMD64_RBP || (reg) == AMD64_RSP) + +#endif /*__native_client_codegen__*/ + #ifdef TARGET_WIN32 #define AMD64_ARG_REG1 AMD64_RCX #define AMD64_ARG_REG2 AMD64_RDX @@ -88,6 +114,16 @@ typedef enum #define AMD64_CALLEE_SAVED_REGS ((1< 7) ? AMD64_REX_B : 0); \ if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ } while (0) +#elif defined(__native_client_codegen__) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + amd64_nacl_tag_rex((inst)); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } while (0) +#endif typedef union { - gsize val; + guint64 val; unsigned char b [8]; } amd64_imm_buf; @@ -138,7 +187,7 @@ typedef union { #define x86_imm_emit64(inst,imm) \ do { \ amd64_imm_buf imb; \ - imb.val = (gsize) (imm); \ + imb.val = (guint64) (imm); \ *(inst)++ = imb.b [0]; \ *(inst)++ = imb.b [1]; \ *(inst)++ = imb.b [2]; \ @@ -158,7 +207,7 @@ typedef union { x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ } while (0) -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ +#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ do { \ if (x86_is_imm8((imm))) { \ amd64_emit_rex(inst, size, 0, 0, (reg)); \ @@ -177,29 +226,67 @@ typedef union { } \ } while (0) -#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) - -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ +#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ do { \ amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) -#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) +#if defined(__default_codegen__) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) + +#elif defined(__native_client_codegen__) +/* NaCl modules may not directly update RSP or RBP other than direct copies */ +/* between them. Instead the lower 4 bytes are updated and then added to R15 */ +#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + do{ \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ do { \ - amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - amd64_membase_emit (inst, reg, basereg, disp); \ -} while (0) + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ + if (((opc) != X86_ADD && (opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) +#endif /*__native_client_codegen__*/ + +#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) + +#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ @@ -207,12 +294,14 @@ typedef union { default: assert (0); \ } \ x86_regp_emit ((inst), (reg), (regp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ @@ -220,27 +309,31 @@ typedef union { default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_mov_mem_reg(inst,mem,reg,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_reg(inst,dreg,reg,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ @@ -248,27 +341,43 @@ typedef union { default: assert (0); \ } \ x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ } while (0) -#define amd64_mov_reg_mem(inst,reg,mem,size) \ +#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ } while (0) -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ +#if defined(__default_codegen__) +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ + } while (0) +#elif defined(__native_client_codegen__) +/* We have to re-base memory reads because memory isn't zero based. */ +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ + } while (0) +#endif /* __native_client_codegen__ */ + +#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ do { \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ @@ -278,8 +387,56 @@ typedef union { amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ + x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } while (0) + +#elif defined(__native_client_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ + /* Add %r15 using LEA to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ + /* Add %r15 */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + #define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ @@ -288,27 +445,34 @@ typedef union { default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_mem(inst,reg,mem) \ do { \ - amd64_emit_rex(inst,8,(reg),0,0); \ - *(inst)++ = (unsigned char)0x63; \ - x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,0); \ + *(inst)++ = (unsigned char)0x63; \ + x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ - amd64_emit_rex(inst,8,(reg),0,(basereg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,(basereg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsxd_reg_reg(inst,dreg,reg) \ do { \ - amd64_emit_rex(inst,8,(dreg),0,(reg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(dreg),0,(reg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ } while (0) /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of @@ -316,18 +480,22 @@ typedef union { */ #define amd64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ - x86_imm_emit64 ((inst), (gsize)(imm)); \ + x86_imm_emit64 ((inst), (guint64)(imm)); \ else \ - x86_imm_emit32 ((inst), (int)(gsize)(imm)); \ + x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_mov_reg_imm(inst,reg,imm) \ do { \ - int _amd64_width_temp = ((gsize)(imm) == (gsize)(int)(gsize)(imm)); \ - amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ + int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) @@ -336,8 +504,9 @@ typedef union { #define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ + amd64_codegen_pre(inst); \ if ((size) == 2) \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ @@ -352,36 +521,69 @@ typedef union { x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ + amd64_codegen_post(inst); \ } while (0) -#define amd64_lea_membase(inst,reg,basereg,disp) \ + +#define amd64_lea_membase_body(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#if defined(__default_codegen__) +#define amd64_lea_membase(inst,reg,basereg,disp) \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)) +#elif defined(__native_client_codegen__) +/* NaCl modules may not write directly into RSP/RBP. Instead, use a */ +/* 32-bit LEA and add R15 to the effective address */ +#define amd64_lea_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + /* 32-bit LEA */ \ + amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x8d; \ + amd64_membase_emit((inst), (reg), (basereg), (disp)); \ + /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) +#endif /*__native_client_codegen__*/ + /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_reg(inst,reg) \ do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) /* Instruction is implicitly 64-bits so don't generate REX for just the size. */ #define amd64_push_membase(inst,basereg,disp) \ do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ } while (0) -#define amd64_pop_reg(inst,reg) \ +#define amd64_pop_reg_body(inst,reg) \ do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) +#if defined(__default_codegen__) + #define amd64_call_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ @@ -389,94 +591,203 @@ typedef union { x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) + #define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) #define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) + +#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) + +#elif defined(__native_client_codegen__) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex ((inst),0,0,0,(reg)); \ + x86_jump_reg((inst),((reg)&0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_mem_size(inst,mem,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg_internal(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex((inst), 0, 0, 0, (reg)); \ + x86_call_reg((inst), ((reg) & 0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_call_reg_internal((inst), (reg)); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + + +#define amd64_ret(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_jump_reg_size((inst), AMD64_R11, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_leave(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_pop_reg(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + } else { \ + amd64_pop_reg_body((inst), (reg)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + #define amd64_movsd_reg_regp(inst,reg,regp) \ do { \ - *(inst)++ = (unsigned char)0xf2; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_regp_reg(inst,regp,reg) \ do { \ - *(inst)++ = (unsigned char)0xf2; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_regp(inst,reg,regp) \ do { \ - *(inst)++ = (unsigned char)0xf3; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movss_regp_reg(inst,regp,reg) \ do { \ - *(inst)++ = (unsigned char)0xf3; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ do { \ - *(inst)++ = (unsigned char)0xf2; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movss_reg_membase(inst,reg,basereg,disp) \ do { \ - *(inst)++ = (unsigned char)0xf3; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ - *(inst)++ = (unsigned char)0xf2; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define amd64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ - *(inst)++ = (unsigned char)0xf3; \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ } while (0) /* The original inc_reg opcode is used as the REX prefix */ #define amd64_inc_reg_size(inst,reg,size) \ - do { \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),0,(reg) & 0x7); \ - } while (0) + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),0,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) #define amd64_dec_reg_size(inst,reg,size) \ - do { \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),1,(reg) & 0x7); \ - } while (0) + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),1,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),0,0,0,(basereg)); \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + amd64_membase_emit ((inst), 0, (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#if defined (__default_codegen__) /* From the AMD64 Software Optimization Manual */ #define amd64_padding_size(inst,size) \ @@ -489,12 +800,6 @@ typedef union { }; \ } while (0) -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ - amd64_emit_rex ((inst),0,0,0,(basereg)); \ - *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - amd64_membase_emit ((inst), 0, (basereg), (disp)); \ -} while (0) - #define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) #define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) @@ -508,6 +813,98 @@ typedef union { } \ } while (0) +#elif defined(__native_client_codegen__) + +/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ +/* 64-bit Native Client because they load into rSP/rBP or use duplicate */ +/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ +/* Intel64 and IA-32 Architectures Optimization Reference Manual and */ +/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ + +#define amd64_padding_size(inst,size) \ + do { \ + unsigned char *code_start = (inst); \ + switch ((size)) { \ + /* xchg %eax,%eax, recognized by hardware as a NOP */ \ + case 1: *(inst)++ = 0x90; break; \ + /* xchg %ax,%ax */ \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \ + break; \ + /* nop (%rax) */ \ + case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + *(inst)++ = 0x00; \ + break; \ + /* nop 0x0(%rax) */ \ + case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) */ \ + case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nopw 0x0(%rax,%rax) */ \ + case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ + *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax) (32-bit displacement) */ \ + case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, AMD64_RAX); \ + x86_imm_emit32((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ + case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit32 ((inst), 0); \ + break; \ + default: \ + g_assert_not_reached(); \ + } \ + g_assert(code_start + (size) == (unsigned char *)(inst)); \ + } while (0) + + +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_call_reg_internal((inst), AMD64_R11); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + } while (0) + +/* On Native Client we can't jump more than INT_MAX in either direction */ +#define amd64_jump_code_size(inst,target,size) \ + do { \ + /* x86_jump_code used twice in case of */ \ + /* relocation by amd64_codegen_post */ \ + guint8* jump_start; \ + amd64_codegen_pre(inst); \ + assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ + x86_jump_code((inst),(target)); \ + inst = amd64_codegen_post(inst); \ + jump_start = (inst); \ + x86_jump_code((inst),(target)); \ + mono_amd64_patch(jump_start, (target)); \ +} while (0) + +#endif /*__native_client_codegen__*/ + /* * SSE */ @@ -517,31 +914,39 @@ typedef union { /* Two opcode SSE defines */ #define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) #define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ + amd64_codegen_pre(inst); \ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ + amd64_codegen_pre(inst); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ } while (0) /* Three opcode SSE defines */ @@ -553,45 +958,55 @@ typedef union { } while (0) #define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ + amd64_codegen_pre(inst); \ *(inst)++ = (unsigned char)(op1); \ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) #define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ + amd64_codegen_pre(inst); \ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ - *(inst)++ = (unsigned char)(op1); \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ - *(inst)++ = (unsigned char)(op1); \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ } while (0) /* Four opcode SSE defines */ #define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ - *(inst)++ = (unsigned char)(op1); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ *(inst)++ = (unsigned char)(op4); \ x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) @@ -954,189 +1369,244 @@ typedef union { /* Generated from x86-codegen.h */ #define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -#define amd64_cld_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); } while (0) -#define amd64_stosb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); } while (0) -#define amd64_stosl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); } while (0) -#define amd64_stosd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); } while (0) -#define amd64_movsb_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); } while (0) -#define amd64_movsl_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); } while (0) -#define amd64_movsd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); } while (0) -#define amd64_prefix_size(inst,p,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_prefix((inst), p); } while (0) -#define amd64_rdtsc_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); } while (0) -#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); } while (0) -#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) -#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_inc_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); } while (0) -#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); } while (0) -//#define amd64_inc_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); } while (0) -#define amd64_dec_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); } while (0) -#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); } while (0) -//#define amd64_dec_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); } while (0) -#define amd64_not_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); } while (0) -#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_not_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); } while (0) -#define amd64_neg_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); } while (0) -#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_neg_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); } while (0) -#define amd64_nop_size(inst,size) do { x86_nop(inst); } while (0) -//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) -#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); } while (0) -#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) -#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) -#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); } while (0) -#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) -//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); } while (0) -#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); } while (0) -//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) -#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); } while (0) -#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); } while (0) -#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); } while (0) -#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); } while (0) -#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); } while (0) -#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); } while (0) -#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); } while (0) -#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); } while (0) -#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); } while (0) -#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); } while (0) -#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); } while (0) -#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); } while (0) -#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); } while (0) -#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); } while (0) -#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); } while (0) -#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); } while (0) -#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); } while (0) -#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) -#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); } while (0) -#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); } while (0) -#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); } while (0) -#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); } while (0) -#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); } while (0) -#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); } while (0) -#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_clear_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); } while (0) -//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); } while (0) -#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); } while (0) -//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); } while (0) -#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); } while (0) -//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) -#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); } while (0) -#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); } while (0) -#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); } while (0) -#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); } while (0) -#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); } while (0) -#define amd64_cdq_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); } while (0) -#define amd64_wait_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); } while (0) -#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); } while (0) -#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); } while (0) -#define amd64_fp_op_size(inst,opc,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); } while (0) -#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); } while (0) -#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); } while (0) -#define amd64_fstp_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); } while (0) -#define amd64_fcompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); } while (0) -#define amd64_fucompp_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); } while (0) -#define amd64_fnstsw_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); } while (0) -#define amd64_fnstcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); } while (0) -#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_fldcw_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); } while (0) -#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_fchs_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); } while (0) -#define amd64_frem_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); } while (0) -#define amd64_fxch_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); } while (0) -#define amd64_fcomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); } while (0) -#define amd64_fcomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); } while (0) -#define amd64_fucomi_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); } while (0) -#define amd64_fucomip_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); } while (0) -#define amd64_fld_size(inst,mem,is_double,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); } while (0) -//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); } while (0) -#define amd64_fld80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); } while (0) -#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_fild_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); } while (0) -#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) -#define amd64_fld_reg_size(inst,index,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); } while (0) -#define amd64_fldz_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); } while (0) -#define amd64_fld1_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); } while (0) -#define amd64_fldpi_size(inst,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); } while (0) -#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); } while (0) -#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); } while (0) -#define amd64_fst80_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); } while (0) -#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); } while (0) -#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); } while (0) -#define amd64_fstsw_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); } while (0) -#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); } while (0) -//#define amd64_push_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); } while (0) -#define amd64_push_regp_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); } while (0) -#define amd64_push_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); } while (0) -//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); } while (0) -#define amd64_push_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); } while (0) -//#define amd64_pop_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); } while (0) -#define amd64_pop_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); } while (0) -#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); } while (0) -#define amd64_pushad_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); } while (0) -#define amd64_pushfd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); } while (0) -#define amd64_popad_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); } while (0) -#define amd64_popfd_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); } while (0) -#define amd64_loop_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); } while (0) -#define amd64_loope_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); } while (0) -#define amd64_loopne_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); } while (0) -#define amd64_jump32_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); } while (0) -#define amd64_jump8_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); } while (0) +#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) +#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) +#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) +#if !defined( __native_client_codegen__ ) +/* Defined above for Native Client, so they can be used in other macros */ #define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) -#define amd64_jump_disp_size(inst,disp,size) do { amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); } while (0) +#endif +#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); } while (0) -#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); } while (0) -#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { x86_set_mem((inst),(cond),(mem),(is_signed)); } while (0) -#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); } while (0) +#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) +#if defined(__default_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) +#elif defined(__native_client_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) \ + do { \ + /* amd64_branch_size_body used twice in */ \ + /* case of relocation by amd64_codegen_post */ \ + guint8* branch_start; \ + amd64_codegen_pre(inst); \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + inst = amd64_codegen_post(inst); \ + branch_start = inst; \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + mono_amd64_patch(branch_start, (target)); \ + } while (0) +#endif + +#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) + +#if defined(__default_codegen__) + #define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) -//#define amd64_call_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); } while (0) -#define amd64_call_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); } while (0) #define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) -//#define amd64_ret_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); } while (0) -#define amd64_ret_imm_size(inst,imm,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); } while (0) -#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); } while (0) -#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); } while (0) -#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); } while (0) -#define amd64_enter_size(inst,framesize) do { amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); } while (0) -//#define amd64_leave_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); } while (0) -#define amd64_sahf_size(inst,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); } while (0) -#define amd64_fsin_size(inst,size) do { x86_fsin(inst); } while (0) -#define amd64_fcos_size(inst,size) do { x86_fcos(inst); } while (0) -#define amd64_fabs_size(inst,size) do { x86_fabs(inst); } while (0) -#define amd64_ftst_size(inst,size) do { x86_ftst(inst); } while (0) -#define amd64_fxam_size(inst,size) do { x86_fxam(inst); } while (0) -#define amd64_fpatan_size(inst,size) do { x86_fpatan(inst); } while (0) -#define amd64_fprem_size(inst,size) do { x86_fprem(inst); } while (0) -#define amd64_fprem1_size(inst,size) do { x86_fprem1(inst); } while (0) -#define amd64_frndint_size(inst,size) do { x86_frndint(inst); } while (0) -#define amd64_fsqrt_size(inst,size) do { x86_fsqrt(inst); } while (0) -#define amd64_fptan_size(inst,size) do { x86_fptan(inst); } while (0) -//#define amd64_padding_size(inst,size) do { x86_padding((inst),(size)); } while (0) -#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); } while (0) -#define amd64_epilog_size(inst,reg_mask,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); } while (0) -#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); } while (0) -#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); } while (0) -#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); } while (0) + +#elif defined(__native_client_codegen__) +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_imm_size(inst,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre((inst)); \ + x86_call_imm((inst),(disp)); \ + amd64_call_sequence_post((inst)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* x86_call_code is called twice below, first so we can get the size of the */ +/* call sequence, and again so the exact offset from "inst" is used, since */ +/* the sequence could have moved from amd64_call_sequence_post. */ +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_call_code_size(inst,target,size) \ + do { \ + amd64_codegen_pre((inst)); \ + guint8* adjusted_start; \ + guint8* call_start; \ + amd64_call_sequence_pre((inst)); \ + x86_call_code((inst),(target)); \ + adjusted_start = amd64_call_sequence_post((inst)); \ + call_start = adjusted_start; \ + x86_call_code(adjusted_start, (target)); \ + amd64_codegen_post((inst)); \ + mono_amd64_patch(call_start, (target)); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) +//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) +#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) +#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) diff --git a/amd64/tramp.c b/amd64/tramp.c index 5a4f9a9..6dbec93 100644 --- a/amd64/tramp.c +++ b/amd64/tramp.c @@ -543,7 +543,7 @@ enum_marshal2: amd64_call_reg (p, AMD64_R11); if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { - amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, 8); + amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, SIZEOF_VOID_P); } /* * Handle retval. @@ -883,19 +883,19 @@ enum_calc_size: * Initialize MonoInvocation fields, first the ones known now. */ amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, 8); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, SIZEOF_VOID_P); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, SIZEOF_VOID_P); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, SIZEOF_VOID_P); /* * Set the method pointer. */ - amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, 8); + amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, SIZEOF_VOID_P); /* * Handle this. */ if (sig->hasthis) - amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, 8); + amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, SIZEOF_VOID_P); /* * Handle the arguments. stackval_pos is the offset from RBP of the stackval in the MonoInvocation args array . @@ -903,7 +903,7 @@ enum_calc_size: * We just call stackval_from_data to handle all the (nasty) issues.... */ amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, SIZEOF_VOID_P); for (i = 0; i < sig->param_count; ++i) { /* Need to call stackval_from_data (MonoType *type, stackval *result, char *data, gboolean pinvoke); */ amd64_mov_reg_imm (p, AMD64_R11, stackval_from_data); @@ -926,12 +926,12 @@ enum_calc_size: * Handle the return value storage area. */ amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, 8); + amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, SIZEOF_VOID_P); if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { MonoClass *klass = sig->ret->data.klass; if (!klass->enumtype) { - amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, 8); - amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, 8); + amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, SIZEOF_VOID_P); + amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, SIZEOF_VOID_P); } } @@ -947,7 +947,7 @@ enum_calc_size: */ amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); if (sig->ret->byref) { - amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); + amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, SIZEOF_VOID_P); } else { int simpletype = sig->ret->type; enum_retvalue: -- cgit v1.1 From 4edb45273377cc0858dab7e12b19026467e796c5 Mon Sep 17 00:00:00 2001 From: Elijah Taylor Date: Tue, 14 Dec 2010 16:03:45 -0800 Subject: Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client --- x86/x86-codegen.h | 214 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 180 insertions(+), 34 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index af3e3c6..6ca3695 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -17,9 +17,7 @@ #include #ifdef __native_client_codegen__ -#define kNaClAlignment 32 -#define kNaClAlignmentMask (kNaClAlignment - 1) -extern guint8 nacl_align_byte; +extern gint8 nacl_align_byte; #endif /* __native_client_codegen__ */ @@ -28,15 +26,10 @@ extern guint8 nacl_align_byte; #define x86_call_sequence_pre(inst) guint8* _code_start = (inst); #define x86_call_sequence_post(inst) \ (mono_nacl_align_call(&_code_start, &(inst)), _code_start); -#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); -#define x86_call_sequence_post_val(inst) \ - (mono_nacl_align_call(&_code_start, &(inst)), _code_start); #else #define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0) -#define x86_call_sequence_pre(inst) -#define x86_call_sequence_post(inst) -#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); -#define x86_call_sequence_post_val(inst) _code_start +#define x86_call_sequence_pre(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post(inst) _code_start #endif /* __native_client_codegen__ */ @@ -305,7 +298,7 @@ typedef union { #define kMaxMembaseEmitPadding 6 -#define x86_membase_emit(inst,r,basereg,disp) do {\ +#define x86_membase_emit_body(inst,r,basereg,disp) do {\ if ((basereg) == X86_ESP) { \ if ((disp) == 0) { \ x86_address_byte ((inst), 0, (r), X86_ESP); \ @@ -334,6 +327,18 @@ typedef union { } \ } while (0) +#if defined(__native_client_codegen__) && defined(TARGET_AMD64) +#define x86_membase_emit(inst,r,basereg,disp) \ + do { \ + amd64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \ + } while (0) +#else /* __default_codegen__ || 32-bit NaCl codegen */ +#define x86_membase_emit(inst,r,basereg,disp) \ + do { \ + x86_membase_emit_body((inst),(r),(basereg),(disp)); \ + } while (0) +#endif + #define kMaxMemindexEmitPadding 6 #define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \ @@ -351,7 +356,7 @@ typedef union { x86_imm_emit8 ((inst), (disp)); \ } else { \ x86_address_byte ((inst), 2, (r), 4); \ - x86_address_byte ((inst), (shift), (indexreg), 5); \ + x86_address_byte ((inst), (shift), (indexreg), (basereg)); \ x86_imm_emit32 ((inst), (disp)); \ } \ } while (0) @@ -438,12 +443,23 @@ typedef union { } while ( in_nop ); \ } while (0) +#if defined(__native_client__) #define x86_patch(ins,target) \ do { \ unsigned char* inst = (ins); \ + guint8* new_target = nacl_modify_patch_target((target)); \ x86_skip_nops((inst)); \ - x86_do_patch((inst), (target)); \ + x86_do_patch((inst), new_target); \ } while (0) +#else /* __native_client__ */ +#define x86_patch(ins,target) \ + do { \ + unsigned char* inst = (ins); \ + guint8* new_target = (target); \ + x86_skip_nops((inst)); \ + x86_do_patch((inst), new_target); \ + } while (0) +#endif /* __native_client__ */ #else #define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0) @@ -472,6 +488,13 @@ typedef union { #define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) #define x86_movsd(inst) x86_movsl((inst)) +#if defined(__default_codegen__) +#define x86_prefix(inst,p) \ + do { \ + *(inst)++ =(unsigned char) (p); \ + } while (0) +#elif defined(__native_client_codegen__) +#if defined(TARGET_X86) /* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */ /* This keeps us from having to call x86_codegen_pre with specific */ /* knowledge of the size of the instruction that follows it, and */ @@ -481,6 +504,18 @@ typedef union { x86_codegen_pre(&(inst), kNaClAlignment - 1); \ *(inst)++ =(unsigned char) (p); \ } while (0) +#elif defined(TARGET_AMD64) +/* We need to tag any prefixes so we can perform proper membase sandboxing */ +/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */ +#define x86_prefix(inst,p) \ + do { \ + amd64_nacl_tag_legacy_prefix((inst)); \ + *(inst)++ =(unsigned char) (p); \ + } while (0) + +#endif /* TARGET_AMD64 */ + +#endif /* __native_client_codegen__ */ #define x86_rdtsc(inst) \ do { \ @@ -1041,7 +1076,7 @@ typedef union { x86_codegen_pre(&(inst), 7); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ @@ -1053,7 +1088,7 @@ typedef union { x86_codegen_pre(&(inst), 3); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ @@ -1065,7 +1100,7 @@ typedef union { x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ @@ -1077,7 +1112,7 @@ typedef union { x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ @@ -1089,7 +1124,7 @@ typedef union { x86_codegen_pre(&(inst), 3); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ @@ -1101,7 +1136,7 @@ typedef union { x86_codegen_pre(&(inst), 7); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ @@ -1115,7 +1150,7 @@ typedef union { x86_codegen_pre(&(inst), kMovRegMembasePadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ @@ -1127,7 +1162,7 @@ typedef union { x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: *(inst)++ = (unsigned char)0x66; /* fall through */ \ + case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \ case 4: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ @@ -1155,7 +1190,7 @@ typedef union { x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ x86_codegen_pre(&(inst), 9); \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ *(inst)++ = (unsigned char)0xc7; \ x86_mem_emit ((inst), 0, (mem)); \ x86_imm_emit16 ((inst), (imm)); \ @@ -1176,7 +1211,7 @@ typedef union { x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg), (disp)); \ x86_imm_emit16 ((inst), (imm)); \ @@ -1197,7 +1232,7 @@ typedef union { x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \ - *(inst)++ = (unsigned char)0x66; \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ *(inst)++ = (unsigned char)0xc7; \ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \ x86_imm_emit16 ((inst), (imm)); \ @@ -1681,6 +1716,7 @@ typedef union { x86_imm_emit8 ((inst), (imm)); \ } while (0) +#if defined(TARGET_X86) #define x86_jump32(inst,imm) \ do { \ x86_codegen_pre(&(inst), 5); \ @@ -1694,9 +1730,27 @@ typedef union { *(inst)++ = (unsigned char)0xeb; \ x86_imm_emit8 ((inst), (imm)); \ } while (0) +#elif defined(TARGET_AMD64) +/* These macros are used directly from mini-amd64.c and other */ +/* amd64 specific files, so they need to be instrumented directly. */ +#define x86_jump32(inst,imm) \ + do { \ + amd64_codegen_pre(inst); \ + *(inst)++ = (unsigned char)0xe9; \ + x86_imm_emit32 ((inst), (imm)); \ + amd64_codegen_post(inst); \ + } while (0) +#define x86_jump8(inst,imm) \ + do { \ + amd64_codegen_pre(inst); \ + *(inst)++ = (unsigned char)0xeb; \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ + } while (0) +#endif -#ifdef __native_client_codegen__ +#if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) #define x86_jump_reg(inst,reg) do { \ x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0x83; /* and */ \ @@ -1747,7 +1801,7 @@ typedef union { /* * target is a pointer in our buffer. */ -#define x86_jump_code(inst,target) \ +#define x86_jump_code_body(inst,target) \ do { \ int t; \ x86_codegen_pre(&(inst), 2); \ @@ -1761,6 +1815,31 @@ typedef union { } \ } while (0) +#if defined(__default_codegen__) +#define x86_jump_code(inst,target) \ + do { \ + x86_jump_code_body((inst),(target)); \ + } while (0) +#elif defined(__native_client_codegen__) && defined(TARGET_X86) +#define x86_jump_code(inst,target) \ + do { \ + guint8* jump_start = (inst); \ + x86_jump_code_body((inst),(target)); \ + x86_patch(jump_start, (target)); \ + } while (0) +#elif defined(__native_client_codegen__) && defined(TARGET_AMD64) +#define x86_jump_code(inst,target) \ + do { \ + /* jump_code_body is used twice because there are offsets */ \ + /* calculated based on the IP, which can change after the */ \ + /* call to amd64_codegen_post */ \ + amd64_codegen_pre(inst); \ + x86_jump_code_body((inst),(target)); \ + inst = amd64_codegen_post(inst); \ + x86_jump_code_body((inst),(target)); \ + } while (0) +#endif /* __native_client_codegen__ */ + #define x86_jump_disp(inst,disp) \ do { \ int t = (disp) - 2; \ @@ -1772,6 +1851,7 @@ typedef union { } \ } while (0) +#if defined(TARGET_X86) #define x86_branch8(inst,cond,imm,is_signed) \ do { \ x86_codegen_pre(&(inst), 2); \ @@ -1792,12 +1872,40 @@ typedef union { *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ x86_imm_emit32 ((inst), (imm)); \ } while (0) +#elif defined(TARGET_AMD64) +/* These macros are used directly from mini-amd64.c and other */ +/* amd64 specific files, so they need to be instrumented directly. */ +#define x86_branch8(inst,cond,imm,is_signed) \ + do { \ + amd64_codegen_pre(inst); \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)]; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)]; \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ + } while (0) +#define x86_branch32(inst,cond,imm,is_signed) \ + do { \ + amd64_codegen_pre(inst); \ + *(inst)++ = (unsigned char)0x0f; \ + if ((is_signed)) \ + *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ + else \ + *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ + x86_imm_emit32 ((inst), (imm)); \ + amd64_codegen_post(inst); \ + } while (0) +#endif +#if defined(TARGET_X86) #define x86_branch(inst,cond,target,is_signed) \ do { \ int offset; \ + guint8* branch_start; \ x86_codegen_pre(&(inst), 2); \ offset = (target) - (inst) - 2; \ + branch_start = (inst); \ if (x86_is_imm8 ((offset))) \ x86_branch8 ((inst), (cond), offset, (is_signed)); \ else { \ @@ -1805,7 +1913,42 @@ typedef union { offset = (target) - (inst) - 6; \ x86_branch32 ((inst), (cond), offset, (is_signed)); \ } \ + x86_patch(branch_start, (target)); \ } while (0) +#elif defined(TARGET_AMD64) +/* This macro is used directly from mini-amd64.c and other */ +/* amd64 specific files, so it needs to be instrumented directly. */ + +#define x86_branch_body(inst,cond,target,is_signed) \ + do { \ + int offset = (target) - (inst) - 2; \ + if (x86_is_imm8 ((offset))) \ + x86_branch8 ((inst), (cond), offset, (is_signed)); \ + else { \ + offset = (target) - (inst) - 6; \ + x86_branch32 ((inst), (cond), offset, (is_signed)); \ + } \ + } while (0) + +#if defined(__default_codegen__) +#define x86_branch(inst,cond,target,is_signed) \ + do { \ + x86_branch_body((inst),(cond),(target),(is_signed)); \ + } while (0) +#elif defined(__native_client_codegen__) +#define x86_branch(inst,cond,target,is_signed) \ + do { \ + /* branch_body is used twice because there are offsets */ \ + /* calculated based on the IP, which can change after */ \ + /* the call to amd64_codegen_post */ \ + amd64_codegen_pre(inst); \ + x86_branch_body((inst),(cond),(target),(is_signed)); \ + inst = amd64_codegen_post(inst); \ + x86_branch_body((inst),(cond),(target),(is_signed)); \ + } while (0) +#endif /* __native_client_codegen__ */ + +#endif /* TARGET_AMD64 */ #define x86_branch_disp(inst,cond,disp,is_signed) \ do { \ @@ -1865,10 +2008,10 @@ typedef union { x86_call_sequence_post((inst)); \ } while (0) -#ifdef __native_client_codegen__ + +#if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) #define x86_call_reg_internal(inst,reg) \ do { \ - x86_codegen_pre(&(inst), 5); \ *(inst)++ = (unsigned char)0x83; /* and */ \ x86_reg_emit ((inst), 4, (reg)); /* reg */ \ *(inst)++ = (unsigned char)nacl_align_byte; \ @@ -1914,20 +2057,23 @@ typedef union { #endif /* __native_client_codegen__ */ -#ifdef __native_client_codegen__ +#if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) #define x86_call_code(inst,target) \ do { \ int _x86_offset; \ + guint8* call_start; \ guint8* _aligned_start; \ - x86_call_sequence_pre_val ((inst)); \ + x86_call_sequence_pre((inst)); \ _x86_offset = (unsigned char*)(target) - (inst); \ _x86_offset -= 5; \ x86_call_imm_body ((inst), _x86_offset); \ - _aligned_start = x86_call_sequence_post_val ((inst)); \ + _aligned_start = x86_call_sequence_post((inst)); \ + call_start = _aligned_start; \ _x86_offset = (unsigned char*)(target) - (_aligned_start); \ _x86_offset -= 5; \ x86_call_imm_body ((_aligned_start), _x86_offset); \ + x86_patch(call_start, (target)); \ } while (0) #define SIZE_OF_RET 6 @@ -2062,9 +2208,9 @@ typedef union { #ifdef __native_client_codegen__ -#define kNaClLengthOfCallReg 5 -#define kNaClLengthOfCallImm 5 -#define kNaClLengthOfCallMembase (kNaClLengthOfCallReg + 6) +#define kx86NaClLengthOfCallReg 5 +#define kx86NaClLengthOfCallImm 5 +#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6) #endif /* __native_client_codegen__ */ -- cgit v1.1 From a7074ea55af096913e4bcc8e044be7601bcc55b5 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 6 Jan 2011 11:49:32 +0100 Subject: Fix warnings introduced by the NACL merge. --- x86/x86-codegen.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 6ca3695..3805db0 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -23,13 +23,18 @@ extern gint8 nacl_align_byte; #if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) #define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0) -#define x86_call_sequence_pre(inst) guint8* _code_start = (inst); -#define x86_call_sequence_post(inst) \ +#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post_val(inst) \ (mono_nacl_align_call(&_code_start, &(inst)), _code_start); +#define x86_call_sequence_pre(inst) x86_call_sequence_pre_val((inst)) +#define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst)) #else #define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0) -#define x86_call_sequence_pre(inst) guint8* _code_start = (inst); -#define x86_call_sequence_post(inst) _code_start +/* Two variants are needed to avoid warnings */ +#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); +#define x86_call_sequence_post_val(inst) _code_start +#define x86_call_sequence_pre(inst) +#define x86_call_sequence_post(inst) #endif /* __native_client_codegen__ */ @@ -2064,11 +2069,11 @@ typedef union { int _x86_offset; \ guint8* call_start; \ guint8* _aligned_start; \ - x86_call_sequence_pre((inst)); \ + x86_call_sequence_pre_val((inst)); \ _x86_offset = (unsigned char*)(target) - (inst); \ _x86_offset -= 5; \ x86_call_imm_body ((inst), _x86_offset); \ - _aligned_start = x86_call_sequence_post((inst)); \ + _aligned_start = x86_call_sequence_post_val((inst)); \ call_start = _aligned_start; \ _x86_offset = (unsigned char*)(target) - (_aligned_start); \ _x86_offset -= 5; \ -- cgit v1.1 From 48f5efeb334eb4b6e867c65ae53e21b3c45fd771 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 6 Jan 2011 19:35:45 +0100 Subject: Put back a macro definition accidently removed by the nacl changes. --- amd64/amd64-codegen.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 8684a5c..acd4052 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -282,6 +282,15 @@ typedef union { #define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + amd64_membase_emit (inst, reg, basereg, disp); \ + amd64_codegen_post(inst); \ +} while (0) + #define amd64_mov_regp_reg(inst,regp,reg,size) \ do { \ amd64_codegen_pre(inst); \ @@ -1406,7 +1415,7 @@ typedef union { //#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) #define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) #define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) #define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -- cgit v1.1 From c1fb94e7e72e58924dcebe8cdfcdbcbe1e65b644 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 6 Jan 2011 18:43:59 +0100 Subject: Add SHUFPS and macro to emit it. --- x86/x86-codegen.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 3805db0..0a5fca1 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -2380,6 +2380,9 @@ typedef enum { X86_SSE_PEXTRB = 0x14,/*sse41*/ X86_SSE_PEXTRW = 0xC5, X86_SSE_PEXTRD = 0x16,/*sse41*/ + + X86_SSE_SHUFPS = 0xC6, + } X86_SSE_Opcode; @@ -2426,6 +2429,14 @@ typedef enum { x86_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) +#define x86_sse_alu_reg_reg_imm8(inst,opc,dreg,reg, imm8) \ + do { \ + x86_codegen_pre(&(inst), 4); \ + *(inst)++ = (unsigned char)0x0F; \ + *(inst)++ = (unsigned char)(opc); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + *(inst)++ = (unsigned char)(imm8); \ + } while (0) #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ -- cgit v1.1 From 1aa6254fb828e043ea55d7d3e37b02812e2d9bdf Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Thu, 6 Jan 2011 21:36:31 +0100 Subject: Implement Shuffle for 64bits types. * x86-codegen.h: Add macro and define to emit pshufpd. * mini-ops.h: Add OP_SHUPD. * cpu-x86.md: * mini-x86.h: Implement x86 support. * simd-intrinsics.c: Handle shuffle on 64bit types. * VectorOperations.cs: Add new methods. --- x86/x86-codegen.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 0a5fca1..0c67b45 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -2381,7 +2381,7 @@ typedef enum { X86_SSE_PEXTRW = 0xC5, X86_SSE_PEXTRD = 0x16,/*sse41*/ - X86_SSE_SHUFPS = 0xC6, + X86_SSE_SHUFP = 0xC6, } X86_SSE_Opcode; @@ -2438,6 +2438,13 @@ typedef enum { *(inst)++ = (unsigned char)(imm8); \ } while (0) +#define x86_sse_alu_pd_reg_reg_imm8(inst,opc,dreg,reg, imm8) \ + do { \ + x86_codegen_pre(&(inst), 5); \ + *(inst)++ = (unsigned char)0x66; \ + x86_sse_alu_reg_reg_imm8 ((inst), (opc), (dreg), (reg), (imm8)); \ + } while (0) + #define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \ do { \ x86_codegen_pre(&(inst), 4); \ -- cgit v1.1 From f0e5c2be6946491ba052c82794361ec0d33cb04c Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Fri, 7 Jan 2011 00:19:03 +0000 Subject: AMD64 version of the new mono.simd ops --- amd64/amd64-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index acd4052..cb7c80e 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -1122,6 +1122,10 @@ typedef union { #define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) +#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) + +#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) + #define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) -- cgit v1.1 From b7639e01d7603a1e34dd225edb5e99fd2181494b Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 10 Jan 2011 10:40:12 +0100 Subject: Implement a few conversion operations. Add conversion operations between 4f, 2d and 4i. Implemented only on x86 for now. --- x86/x86-codegen.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 0c67b45..ff79b52 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -2382,7 +2382,15 @@ typedef enum { X86_SSE_PEXTRD = 0x16,/*sse41*/ X86_SSE_SHUFP = 0xC6, - + + X86_SSE_CVTDQ2PD = 0xE6, + X86_SSE_CVTDQ2PS = 0x5B, + X86_SSE_CVTPD2DQ = 0xE6, + X86_SSE_CVTPD2PS = 0x5A, + X86_SSE_CVTPS2DQ = 0x5B, + X86_SSE_CVTPS2PD = 0x5A, + X86_SSE_CVTTPD2DQ = 0xE6, + X86_SSE_CVTTPS2DQ = 0x5B, } X86_SSE_Opcode; -- cgit v1.1 From 92a55ae009739b5ec652676b8fdd615375c27fc0 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 10 Jan 2011 10:52:46 +0000 Subject: Implement mono.simd new conversion ops on amd64 --- amd64/amd64-codegen.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index cb7c80e..857c7ab 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -1353,6 +1353,23 @@ typedef union { #define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) +#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) + +#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) + +#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) + +#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) + +#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) + +#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) + +#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) + +#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) + + #define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) #define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) -- cgit v1.1 From f81e3005a53a10c39f4ca8dd30a2a88719c7d005 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Sun, 16 Jan 2011 23:40:23 -0500 Subject: Cast result of s390x_emit16/32 to eliminate lots of warning messages Check for wrapper-managed-to-native when assessing call parameters and have emit_prolog use native_size when processing those parameters Signed-off-by: Neale Ferguson --- s390x/s390x-codegen.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 7a2e069..26411b4 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -433,13 +433,13 @@ typedef struct { #define s390_emit16(c, x) do \ { \ - *((guint16 *) c) = x; \ + *((guint16 *) c) = (guint16) x; \ c += sizeof(guint16); \ } while(0) #define s390_emit32(c, x) do \ { \ - *((guint32 *) c) = x; \ + *((guint32 *) c) = (guint32) x; \ c += sizeof(guint32); \ } while(0) -- cgit v1.1 From b1a613aca13e03185d0ba49e46fd77fd8eb98fc9 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sun, 20 Feb 2011 03:22:52 +0100 Subject: Implement mono_memory_barrier () and OP_MEMORY_BARRIER for ARM. --- arm/arm-codegen.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index e7dc99f..2460fc4 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1084,6 +1084,16 @@ typedef union { #define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff))) #define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL) +/* MCR */ +#define ARM_DEF_MCR_COND(coproc, opc1, rt, crn, crm, opc2, cond) \ + ARM_DEF_COND ((cond)) | ((0xe << 24) | (((opc1) & 0x7) << 21) | (0 << 20) | (((crn) & 0xf) << 16) | (((rt) & 0xf) << 12) | (((coproc) & 0xf) << 8) | (((opc2) & 0x7) << 5) | (1 << 4) | (((crm) & 0xf) << 0)) + +#define ARM_MCR_COND(p, coproc, opc1, rt, crn, crm, opc2, cond) \ + ARM_EMIT(p, ARM_DEF_MCR_COND ((coproc), (opc1), (rt), (crn), (crm), (opc2), (cond))) + +#define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \ + ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL) + #ifdef __cplusplus } #endif -- cgit v1.1 From 4c9723aa3efac03bc33deed252ebda71cbb1ae86 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 8 Mar 2011 12:14:52 +0100 Subject: Fix some warnings. --- amd64/amd64-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 857c7ab..9dd3269 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -214,7 +214,7 @@ typedef union { *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ - } else if ((reg) == X86_EAX) { \ + } else if ((reg) == AMD64_RAX) { \ amd64_emit_rex(inst, size, 0, 0, 0); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ x86_imm_emit32 ((inst), (imm)); \ -- cgit v1.1 From d093f6fff2bcaa4ccfc795354b151c7ca1a0c613 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Fri, 6 May 2011 12:52:19 -0400 Subject: Implement soft debugger for s390x and fix context macro for s390x --- s390x/s390x-codegen.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 26411b4..7f74e3d 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -335,7 +335,7 @@ typedef struct { typedef struct { char op1; - char r1 : 4; + char m1 : 4; char op2 : 4; short i2; } RI_Format; @@ -726,6 +726,7 @@ typedef struct { #define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2) #define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) #define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_nop(c) S390_RR(c, 0x07, 0x0, 0) #define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) #define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) #define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d) -- cgit v1.1 From d2a95b8feb24584dd528b3deb0f5f1ec5d7766a3 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 23 Jun 2011 21:33:43 +0200 Subject: Fix out-of-tree builds on arm. --- arm/arm-codegen.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 2460fc4..cb3753d 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -948,8 +948,7 @@ typedef struct { - -#include "arm_dpimacros.h" +#include "mono/arch/arm/arm_dpimacros.h" #define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0) -- cgit v1.1 From 8034d4b8f49485babcbffd12d3e09fd372c00ccb Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 6 Jul 2011 16:16:16 +0200 Subject: Prefix ARM FPA codegen macros with 'FPA'. --- arm/arm-fpa-codegen.h | 46 +++++++++++++++++++++++----------------------- arm/fpam_macros.th | 8 ++++---- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/arm/arm-fpa-codegen.h b/arm/arm-fpa-codegen.h index 37653b0..58ed6ca 100644 --- a/arm/arm-fpa-codegen.h +++ b/arm/arm-fpa-codegen.h @@ -99,25 +99,25 @@ enum { ARM_DEF_COND(cond) /* FP load and stores */ -#define ARM_LDFS_COND(p,freg,base,offset,cond) \ +#define ARM_FPA_LDFS_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) -#define ARM_LDFS(p,freg,base,offset) \ - ARM_LDFS_COND(p,freg,base,offset,ARMCOND_AL) +#define ARM_FPA_LDFS(p,freg,base,offset) \ + ARM_FPA_LDFS_COND(p,freg,base,offset,ARMCOND_AL) -#define ARM_LDFD_COND(p,freg,base,offset,cond) \ +#define ARM_FPA_LDFD_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) -#define ARM_LDFD(p,freg,base,offset) \ - ARM_LDFD_COND(p,freg,base,offset,ARMCOND_AL) +#define ARM_FPA_LDFD(p,freg,base,offset) \ + ARM_FPA_LDFD_COND(p,freg,base,offset,ARMCOND_AL) -#define ARM_STFS_COND(p,freg,base,offset,cond) \ +#define ARM_FPA_STFS_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) -#define ARM_STFS(p,freg,base,offset) \ - ARM_STFS_COND(p,freg,base,offset,ARMCOND_AL) +#define ARM_FPA_STFS(p,freg,base,offset) \ + ARM_FPA_STFS_COND(p,freg,base,offset,ARMCOND_AL) -#define ARM_STFD_COND(p,freg,base,offset,cond) \ +#define ARM_FPA_STFD_COND(p,freg,base,offset,cond) \ ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) -#define ARM_STFD(p,freg,base,offset) \ - ARM_STFD_COND(p,freg,base,offset,ARMCOND_AL) +#define ARM_FPA_STFD(p,freg,base,offset) \ + ARM_FPA_STFD_COND(p,freg,base,offset,ARMCOND_AL) #define ARM_DEF_FPA_CPDO_MONADIC(cond,op,dreg,sreg,round,prec) \ (1 << 8) | (14 << 24) | \ @@ -159,34 +159,34 @@ enum { #include "arm_fpamacros.h" -#define ARM_RNDDZ_COND(p,dreg,sreg,cond) \ +#define ARM_FPA_RNDDZ_COND(p,dreg,sreg,cond) \ ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_ZERO,ARM_FPA_ROUND_DOUBLE)) -#define ARM_RNDDZ(p,dreg,sreg) ARM_RNDD_COND(p,dreg,sreg,ARMCOND_AL) +#define ARM_FPA_RNDDZ(p,dreg,sreg) ARM_FPA_RNDD_COND(p,dreg,sreg,ARMCOND_AL) /* compares */ -#define ARM_FCMP_COND(p,op,sreg1,sreg2,cond) \ +#define ARM_FPA_FCMP_COND(p,op,sreg1,sreg2,cond) \ ARM_EMIT(p, ARM_DEF_FPA_CMP(cond,op,sreg1,sreg2)) -#define ARM_FCMP(p,op,sreg1,sreg2) ARM_FCMP_COND(p,op,sreg1,sreg2,ARMCOND_AL) +#define ARM_FPA_FCMP(p,op,sreg1,sreg2) ARM_FPA_FCMP_COND(p,op,sreg1,sreg2,ARMCOND_AL) /* coprocessor register transfer */ -#define ARM_FLTD(p,fn,rd) \ +#define ARM_FPA_FLTD(p,fn,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_DOUBLE,ARM_FPA_ROUND_NEAREST)) -#define ARM_FLTS(p,fn,rd) \ +#define ARM_FPA_FLTS(p,fn,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_SINGLE,ARM_FPA_ROUND_NEAREST)) -#define ARM_FIXZ(p,rd,fm) \ +#define ARM_FPA_FIXZ(p,rd,fm) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FIX,0,(fm),(rd),0,ARM_FPA_ROUND_ZERO)) -#define ARM_WFS(p,rd) \ +#define ARM_FPA_WFS(p,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) -#define ARM_RFS(p,rd) \ +#define ARM_FPA_RFS(p,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) -#define ARM_WFC(p,rd) \ +#define ARM_FPA_WFC(p,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) -#define ARM_RFC(p,rd) \ +#define ARM_FPA_RFC(p,rd) \ ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) #endif /* __MONO_ARM_FPA_CODEGEN_H__ */ diff --git a/arm/fpam_macros.th b/arm/fpam_macros.th index 914105e..15183c3 100644 --- a/arm/fpam_macros.th +++ b/arm/fpam_macros.th @@ -3,12 +3,12 @@ /* Fd := Rm */ -#define ARM_D_COND(p,dreg,sreg,cond) \ +#define ARM_FPA_D_COND(p,dreg,sreg,cond) \ ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_D(p,dreg,sreg) ARM_D_COND(p,dreg,sreg,ARMCOND_AL) +#define ARM_FPA_D(p,dreg,sreg) ARM_FPA_D_COND(p,dreg,sreg,ARMCOND_AL) -#define ARM_S_COND(p,dreg,sreg,cond) \ +#define ARM_FPA_S_COND(p,dreg,sreg,cond) \ ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_S(p,dreg,sreg) ARM_S_COND(p,dreg,sreg,ARMCOND_AL) +#define ARM_FPA_S(p,dreg,sreg) ARM_FPA_S_COND(p,dreg,sreg,ARMCOND_AL) -- cgit v1.1 From c6d53e16991eb2dcc3e4d99a008fdd899d2b78f2 Mon Sep 17 00:00:00 2001 From: Elijah Taylor Date: Fri, 5 Aug 2011 17:02:45 +0200 Subject: Fix up bugs in x86-codegen for NaCl. --- x86/x86-codegen.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ff79b52..fd2c528 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -524,6 +524,7 @@ typedef union { #define x86_rdtsc(inst) \ do { \ + x86_codegen_pre(&(inst), 2); \ *(inst)++ = 0x0f; \ *(inst)++ = 0x31; \ } while (0) @@ -584,7 +585,7 @@ typedef union { #define x86_xadd_reg_reg(inst,dreg,reg,size) \ do { \ - x86_codegen_pre(&(inst), 4); \ + x86_codegen_pre(&(inst), 3); \ *(inst)++ = (unsigned char)0x0F; \ if ((size) == 1) \ *(inst)++ = (unsigned char)0xC0; \ @@ -670,14 +671,14 @@ typedef union { #define x86_neg_mem(inst,mem) \ do { \ - x86_codegen_pre(&(inst), 2); \ + x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0xf7; \ x86_mem_emit ((inst), 3, (mem)); \ } while (0) #define x86_neg_membase(inst,basereg,disp) \ do { \ - x86_codegen_pre(&(inst), 6); \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xf7; \ x86_membase_emit ((inst), 3, (basereg), (disp)); \ } while (0) @@ -881,11 +882,11 @@ typedef union { #define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \ do { \ if ((imm) == 1) { \ - x86_codegen_pre(&(inst), 6); \ + x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xd1; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ } else { \ - x86_codegen_pre(&(inst), 7); \ + x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \ *(inst)++ = (unsigned char)0xc1; \ x86_membase_emit ((inst), (opc), (basereg), (disp)); \ x86_imm_emit8 ((inst), (imm)); \ -- cgit v1.1 From 96e5ba7724999828facefb30e0982d0be6931bda Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 9 Nov 2011 01:13:16 +0100 Subject: Add support for hardfp abi on ARM. --- arm/arm-vfp-codegen.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index 7916957..6d5c4cc 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -156,6 +156,12 @@ enum { #define ARM_FSTD(p,freg,base,offset) \ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL) +/* VSTM/VLDM */ +#define VSTMIA_COND(p,cond,rn,w,first_reg,nregs) ARM_EMIT((p), ((nregs * 2) << 0) | (0xb << 8) | (((first_reg) & 0xf) << 12) | ((rn) << 16) | (0 << 20) | ((w) << 21) | ((first_reg >> 4) << 22) | (1 << 23) | (0 << 24) | (0x6 << 25) | ((cond) << 28)) +#define VSTMIA(p,rn,w,first_reg,nregs) VSTMIA_COND((p), ARMCOND_AL, (rn), (w), (first_reg), (nregs)) +#define VLDMIA_COND(p,cond,rn,w,first_reg,nregs) ARM_EMIT((p), ((nregs * 2) << 0) | (0xb << 8) | (((first_reg) & 0xf) << 12) | ((rn) << 16) | (1 << 20) | ((w) << 21) | ((first_reg >> 4) << 22) | (1 << 23) | (0 << 24) | (0x6 << 25) | ((cond) << 28)) +#define VLDMIA(p,rn,w,first_reg,nregs) VLDMIA_COND((p), ARMCOND_AL, (rn), (w), (first_reg), (nregs)) + #include "arm_vfpmacros.h" /* coprocessor register transfer */ -- cgit v1.1 From aaae806b8bd16a82937c9417689aeb82bea0b952 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Wed, 9 Nov 2011 10:25:48 -0500 Subject: Update two days worth of copyrights, many more missing --- arm/arm-vfp-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index 6d5c4cc..2e297bc 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -1,3 +1,7 @@ +// +// Copyright 2011 Xamarin Inc +// + #ifndef __MONO_ARM_VFP_CODEGEN_H__ #define __MONO_ARM_VFP_CODEGEN_H__ -- cgit v1.1 From 32a164a381080aee3afa42ea33e31d89579519a4 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 16 Nov 2011 04:35:31 -0500 Subject: Revert "Add support for hardfp abi on ARM." This reverts commit e7055b45b9211fb20021997f7da0fa24992421f5. --- arm/arm-vfp-codegen.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index 2e297bc..8056f7b 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -160,12 +160,6 @@ enum { #define ARM_FSTD(p,freg,base,offset) \ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL) -/* VSTM/VLDM */ -#define VSTMIA_COND(p,cond,rn,w,first_reg,nregs) ARM_EMIT((p), ((nregs * 2) << 0) | (0xb << 8) | (((first_reg) & 0xf) << 12) | ((rn) << 16) | (0 << 20) | ((w) << 21) | ((first_reg >> 4) << 22) | (1 << 23) | (0 << 24) | (0x6 << 25) | ((cond) << 28)) -#define VSTMIA(p,rn,w,first_reg,nregs) VSTMIA_COND((p), ARMCOND_AL, (rn), (w), (first_reg), (nregs)) -#define VLDMIA_COND(p,cond,rn,w,first_reg,nregs) ARM_EMIT((p), ((nregs * 2) << 0) | (0xb << 8) | (((first_reg) & 0xf) << 12) | ((rn) << 16) | (1 << 20) | ((w) << 21) | ((first_reg >> 4) << 22) | (1 << 23) | (0 << 24) | (0x6 << 25) | ((cond) << 28)) -#define VLDMIA(p,rn,w,first_reg,nregs) VLDMIA_COND((p), ARMCOND_AL, (rn), (w), (first_reg), (nregs)) - #include "arm_vfpmacros.h" /* coprocessor register transfer */ -- cgit v1.1 From d711efe0d6403fa49697c304696843a789805112 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 2 Dec 2011 06:20:16 +0000 Subject: Ongoing MIPS work. Fix mips_load () to be patchable, fix endianness issue in OP_MIPS_MFC1D, fix OP_JMP. make rcheck runs now. --- mips/mips-codegen.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h index 20ad367..dc4df7d 100644 --- a/mips/mips-codegen.h +++ b/mips/mips-codegen.h @@ -197,17 +197,13 @@ enum { /* Load always using lui/addiu pair (for later patching) */ #define mips_load(c,D,v) do { \ - if (!mips_is_imm16 ((v))) { \ - if (((guint32)(v)) & (1 << 15)) { \ - mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \ - } \ - else { \ - mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \ - } \ - mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \ - } \ - else \ - mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \ + if (((guint32)(v)) & (1 << 15)) { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \ + } \ + else { \ + mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \ + } \ + mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \ } while (0) /* load constant - no patch-up */ -- cgit v1.1 From c565eab0f9d79f6009c3878eaa190529838b0204 Mon Sep 17 00:00:00 2001 From: Miguel de Icaza Date: Mon, 12 Mar 2012 16:15:46 -0400 Subject: Update some copyrights --- arm/arm-codegen.h | 4 +++- arm/arm-fpa-codegen.h | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index cb3753d..31c4575 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1,6 +1,8 @@ /* * arm-codegen.h - * Copyright (c) 2002 Sergey Chaban + * Copyright (c) 2002-2003 Sergey Chaban + * Copyright 2005-2011 Novell Inc + * Copyright 2011 Xamarin Inc */ diff --git a/arm/arm-fpa-codegen.h b/arm/arm-fpa-codegen.h index 58ed6ca..4389a5e 100644 --- a/arm/arm-fpa-codegen.h +++ b/arm/arm-fpa-codegen.h @@ -1,3 +1,8 @@ +/* + * Copyright 2005 Novell Inc + * Copyright 2011 Xamarin Inc + */ + #ifndef __MONO_ARM_FPA_CODEGEN_H__ #define __MONO_ARM_FPA_CODEGEN_H__ -- cgit v1.1 From 33426abe6bd7ad8eb37d2f214afe08a0a3d70a0b Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Mon, 2 Apr 2012 13:30:43 -0400 Subject: s390x-codegen.h - Define s390_SP and s390_BP sgen-major-copy-object.h - Correct assertion test sgen-os-posix.c - Prevent race condition between restarting and suspending a thread --- s390x/s390x-codegen.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 7f74e3d..d3292bf 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -146,6 +146,8 @@ typedef enum { #define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095) #define STK_BASE s390_r15 +#define S390_SP s390_r15 +#define S390_FP s390_r11 #define S390_MINIMAL_STACK_SIZE 160 #define S390_REG_SAVE_OFFSET 48 #define S390_PARM_SAVE_OFFSET 16 -- cgit v1.1 From a841c76b86e38fc8e5db24f152b5fab2501ddf1a Mon Sep 17 00:00:00 2001 From: Iain Lane Date: Sun, 15 Apr 2012 14:49:55 +0100 Subject: Fix ARM printf format problems When building with -Werror=format-security on ARM, mono fails to build due to incorrect format strings in arm-dis.c --- arm/arm-dis.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arm/arm-dis.c b/arm/arm-dis.c index 0a478bc..5074f26 100644 --- a/arm/arm-dis.c +++ b/arm/arm-dis.c @@ -95,7 +95,7 @@ void dump_reg(ARMDis* dis, int reg) { if (!use_reg_alias || (reg > 3 && reg < 11)) { fprintf(dis->dis_out, "r%d", reg); } else { - fprintf(dis->dis_out, reg_alias[reg]); + fprintf(dis->dis_out, "%s", reg_alias[reg]); } } @@ -137,7 +137,7 @@ void dump_reglist(ARMDis* dis, int reg_list) { void dump_br(ARMDis* dis, ARMInstr i) { - fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %p", + fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %#x", (i.br.link == 1) ? "l" : "", cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6)); } @@ -376,7 +376,7 @@ void dump_swi(ARMDis* dis, ARMInstr i) { void dump_clz(ARMDis* dis, ARMInstr i) { - fprintf(dis->dis_out, "clz%s\t"); + fprintf(dis->dis_out, "clz\t"); dump_reg(dis, i.clz.rd); fprintf(dis->dis_out, ", "); dump_reg(dis, i.clz.rm); -- cgit v1.1 From f2e43c392dde726d2f1008dfcc8515d34354e968 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 19 Sep 2012 01:37:26 +0000 Subject: Save/restore fp registers in MonoContext on ios. Fixes #1949. --- arm/arm-vfp-codegen.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index 8056f7b..b3f1dce 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -160,6 +160,12 @@ enum { #define ARM_FSTD(p,freg,base,offset) \ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL) +#define ARM_FLDMD_COND(p,first_reg,nregs,base,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_LDR,0,(base),(first_reg),((nregs) * 2) << 2)) + +#define ARM_FLDMD(p,first_reg,nregs,base) \ + ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL) + #include "arm_vfpmacros.h" /* coprocessor register transfer */ -- cgit v1.1 From 0b64268e0a56e3f76063f0b679975be0daaf68b1 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Wed, 3 Oct 2012 10:26:37 +0200 Subject: Use AM_CPPFLAGS instead of INCLUDES in Makefile.am files, as the later is no longer supported, see http://lists.gnu.org/archive/html/automake/2012-08/msg00087.html. --- Makefile.am | 2 +- alpha/Makefile.am | 2 +- amd64/Makefile.am | 2 +- arm/Makefile.am | 2 +- hppa/Makefile.am | 2 +- mips/Makefile.am | 2 +- ppc/Makefile.am | 2 +- s390/Makefile.am | 2 +- s390x/Makefile.am | 2 +- sparc/Makefile.am | 2 +- x86/Makefile.am | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile.am b/Makefile.am index 0960416..28a9147 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,6 +1,6 @@ DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 mips -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) if INTERP_SUPPORTED SUBDIRS = $(arch_target) diff --git a/alpha/Makefile.am b/alpha/Makefile.am index 8e0accf..86cbcb6 100644 --- a/alpha/Makefile.am +++ b/alpha/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-alpha.la diff --git a/amd64/Makefile.am b/amd64/Makefile.am index 54499b5..3c72826 100644 --- a/amd64/Makefile.am +++ b/amd64/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-amd64.la diff --git a/arm/Makefile.am b/arm/Makefile.am index 180be53..86784c0 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-arm.la diff --git a/hppa/Makefile.am b/hppa/Makefile.am index 7e671cd..1d608ad 100644 --- a/hppa/Makefile.am +++ b/hppa/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-hppa.la diff --git a/mips/Makefile.am b/mips/Makefile.am index c272d04..1063365 100644 --- a/mips/Makefile.am +++ b/mips/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-mips.la diff --git a/ppc/Makefile.am b/ppc/Makefile.am index b013d21..a4e2d5d 100644 --- a/ppc/Makefile.am +++ b/ppc/Makefile.am @@ -1,6 +1,6 @@ if INTERP_SUPPORTED -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-ppc.la diff --git a/s390/Makefile.am b/s390/Makefile.am index 1c62a88..d8ebb6f 100644 --- a/s390/Makefile.am +++ b/s390/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-s390.la diff --git a/s390x/Makefile.am b/s390x/Makefile.am index e7466d9..ce7f470 100644 --- a/s390x/Makefile.am +++ b/s390x/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-s390x.la diff --git a/sparc/Makefile.am b/sparc/Makefile.am index e0f7689..a888904 100644 --- a/sparc/Makefile.am +++ b/sparc/Makefile.am @@ -1,5 +1,5 @@ -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-sparc.la diff --git a/x86/Makefile.am b/x86/Makefile.am index ab4c142..9778237 100644 --- a/x86/Makefile.am +++ b/x86/Makefile.am @@ -1,6 +1,6 @@ if INTERP_SUPPORTED -INCLUDES = $(GLIB_CFLAGS) -I$(top_srcdir) +AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-x86.la -- cgit v1.1 From 600580c96563f5702acee5a0307432e96731d837 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Thu, 4 Oct 2012 13:03:06 +0200 Subject: Save fp registers in the ARM throw trampoline, ios has callee saved fp registers, and LLVM generates code which uses them. --- arm/arm-vfp-codegen.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index b3f1dce..d0fdb29 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -166,6 +166,12 @@ enum { #define ARM_FLDMD(p,first_reg,nregs,base) \ ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL) +#define ARM_FSTMD_COND(p,first_reg,nregs,base,cond) \ + ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_STR,0,(base),(first_reg),((nregs) * 2) << 2)) + +#define ARM_FSTMD(p,first_reg,nregs,base) \ + ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL) + #include "arm_vfpmacros.h" /* coprocessor register transfer */ -- cgit v1.1 From 9c434db79ba98565a8dadcfbbe8737621a698589 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 9 Oct 2012 17:23:38 -0400 Subject: Use full path for includes as this was braking the cross compiler. --- arm/arm-vfp-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index d0fdb29..c4c5e3e 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -172,7 +172,7 @@ enum { #define ARM_FSTMD(p,first_reg,nregs,base) \ ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL) -#include "arm_vfpmacros.h" +#include /* coprocessor register transfer */ #define ARM_FMSR(p,freg,reg) \ -- cgit v1.1 From ddee8bb5125ad07f673a5f9a45ddc629dec8c126 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 26 Feb 2013 22:08:26 +0100 Subject: Remove the unmaintained and incomplete hppa backend. --- Makefile.am | 2 +- hppa/.gitignore | 3 - hppa/Makefile.am | 7 - hppa/hppa-codegen.h | 737 ------------------------------------------------- hppa/tramp.c | 781 ---------------------------------------------------- 5 files changed, 1 insertion(+), 1529 deletions(-) delete mode 100644 hppa/.gitignore delete mode 100644 hppa/Makefile.am delete mode 100644 hppa/hppa-codegen.h delete mode 100644 hppa/tramp.c diff --git a/Makefile.am b/Makefile.am index 28a9147..2cfec09 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha hppa amd64 ia64 mips +DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha amd64 ia64 mips AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/hppa/.gitignore b/hppa/.gitignore deleted file mode 100644 index dc1ebd2..0000000 --- a/hppa/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/Makefile -/Makefile.in -/.deps diff --git a/hppa/Makefile.am b/hppa/Makefile.am deleted file mode 100644 index 1d608ad..0000000 --- a/hppa/Makefile.am +++ /dev/null @@ -1,7 +0,0 @@ - -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-hppa.la - -libmonoarch_hppa_la_SOURCES = tramp.c hppa-codegen.h - diff --git a/hppa/hppa-codegen.h b/hppa/hppa-codegen.h deleted file mode 100644 index c03a9ef..0000000 --- a/hppa/hppa-codegen.h +++ /dev/null @@ -1,737 +0,0 @@ -#ifndef _HPPA_CODEGEN_H_ -#define _HPPA_CODEGEN_H_ - -typedef enum { - hppa_r0 = 0, - hppa_r1, - hppa_r2, - hppa_rp = hppa_r2, - hppa_r3, - hppa_r4, - hppa_r5, - hppa_r6, - hppa_r7, - hppa_r8, - hppa_r9, - hppa_r10, - hppa_r11, - hppa_r12, - hppa_r13, - hppa_r14, - hppa_r15, - hppa_r16, - hppa_r17, - hppa_r18, - hppa_r19, - hppa_r20, - hppa_r21, - hppa_r22, - hppa_r23, - hppa_r24, - hppa_r25, - hppa_r26, - hppa_r27, - hppa_r28, - hppa_r29, - hppa_ap = hppa_r29, - hppa_r30, - hppa_sp = hppa_r30, - hppa_r31 -} HPPAIntRegister; - -typedef enum { - hppa_fr0, - hppa_fr1, - hppa_fr2, - hppa_fr3, - hppa_fr4, - hppa_fr5, - hppa_fr6, - hppa_fr7, - hppa_fr8, - hppa_fr9, - hppa_fr10, - hppa_fr11, - hppa_fr12, - hppa_fr13, - hppa_fr14, - hppa_fr15, - hppa_fr16, - hppa_fr17, - hppa_fr18, - hppa_fr19, - hppa_fr20, - hppa_fr21, - hppa_fr22, - hppa_fr23, - hppa_fr24, - hppa_fr25, - hppa_fr26, - hppa_fr27, - hppa_fr28, - hppa_fr29, - hppa_fr30, - hppa_fr31 -} HPPAFloatRegister; - -#define hppa_opcode(op) ((op) << 26) -#define hppa_opcode_alu(op1, op2) (((op1) << 26) | ((op2) << 6)) -#define hppa_op_r1(r) ((r) << 21) -#define hppa_op_r2(r) ((r) << 16) -#define hppa_op_r3(r) (r) - -/* imm5, imm11 and imm14 are encoded by putting the sign bit in the LSB */ -#define hppa_op_imm5(im5) ((((im5) & 0xf) << 1) | (((int)(im5)) < 0)) -#define hppa_op_imm11(im11) ((((im11) & 0x3ff) << 1) | (((int)(im11)) < 0)) -#define hppa_op_imm14(im14) ((((im14) & 0x1fff) << 1) | (((int)(im14)) < 0)) - -/* HPPA uses "selectors" for some operations. The two we need are L% and R% */ -/* lsel: select left 21 bits */ -#define hppa_lsel(v) (((int)(v))>>11) -/* rsel: select right 11 bits */ -#define hppa_rsel(v) (((int)(v))&0x7ff) - -/* imm12 is used by the conditional branch insns - * w1 (bits [2..12]) - * w (bit 0) - * value = assemble_12(w1,w) = cat(w,w1{10},w1{0..9}) - * (note PA bit numbering) - * - * if the original number is: - * abcdefghijkl - * - * 3 2 1 0 - * 10987654321098765432109876543210 - * cdefghijklb a - */ -static inline int hppa_op_imm12(int im12) -{ - unsigned int a = im12 < 0; - unsigned int b = (im12 >> 10) & 0x1; - unsigned int cdefghijkl = im12 & 0x3ff; - - return (cdefghijkl << 3) | (b << 2) | a; -} - -/* - * imm17 is used by the BL insn, which has - * w1 (bits [16..20]) - * w2 (bits [2..12]) - * w (bit 0) - * value = assemble_17(w1,w2,w) = cat(w,w1,w2{10},w2{0..9}) - * (note PA bit numbering) - * - * if the original number is: - * abcdefghijklmnopq - * - * 3 2 1 0 - * 10987654321098765432109876543210 - * bcdef hijklmnopqg a - */ -static inline int hppa_op_imm17(int im17) -{ - unsigned int a = im17 < 0; - unsigned int bcdef = (im17 >> 11) & 0x1f; - unsigned int g = (im17 >> 10) & 0x1; - unsigned int hijklmnopq = im17 & 0x3ff; - - return (bcdef << 16) | (hijklmnopq << 3) | (g << 2) | a; -} - -/* imm21 is used by addil and ldil - * - * value = assemble_21(x) = cat(x{20},x{9..19},x{5..6},x{0..4},x{7..8}) - * (note PA bit numbering) - * - * if the original number is: - * abcdefghijklmnopqrstu - * - * 3 2 1 0 - * 10987654321098765432109876543210 - * opqrsmntubcdefghijkla - */ -static inline int hppa_op_imm21(int im21) -{ - unsigned int a = im21 < 0; - unsigned int bcdefghijkl = (im21 >> 9) & 0x7ff; - unsigned int mn = (im21 >> 7) & 0x3; - unsigned int opqrs = (im21 >> 2) & 0x1f; - unsigned int tu = im21 & 0x3; - - return (opqrs << 16) | (mn << 14) | (tu << 12) | (bcdefghijkl << 1) | a; -} - -/* returns 1 if VAL can fit in BITS */ -static inline int hppa_check_bits(int val, int bits) -{ - /* positive offset */ - if (!(val & (1 << (bits - 1))) && (val >> bits) != 0) - return 0; - /* negative offset */ - if ((val & (1 << (bits - 1))) && ((val >> bits) != (-1 >>(bits+2)))) - return 0; - return 1; -} - -static inline void *hppa_emit(void *inp, unsigned int insn) -{ - unsigned int *code = inp; - *code = insn; - return ((char *)code) + 4; -} - -/* Table 5-3: Compare conditons */ -#define HPPA_CMP_COND_NEVER (0) -#define HPPA_CMP_COND_EQ (1) -#define HPPA_CMP_COND_SLT (2) -#define HPPA_CMP_COND_SLE (3) -#define HPPA_CMP_COND_ULT (4) -#define HPPA_CMP_COND_ULE (5) -#define HPPA_CMP_COND_OV (6) -#define HPPA_CMP_COND_ODD (7) - -/* Table 5-3: Subtaction conditions */ -#define HPPA_SUB_COND_NEVER ((0 << 1) | 0) -#define HPPA_SUB_COND_EQ ((1 << 1) | 0) -#define HPPA_SUB_COND_SLT ((2 << 1) | 0) -#define HPPA_SUB_COND_SLE ((3 << 1) | 0) -#define HPPA_SUB_COND_ULT ((4 << 1) | 0) -#define HPPA_SUB_COND_ULE ((5 << 1) | 0) -#define HPPA_SUB_COND_SV ((6 << 1) | 0) -#define HPPA_SUB_COND_OD ((7 << 1) | 0) -#define HPPA_SUB_COND_ALWAYS ((0 << 1) | 1) -#define HPPA_SUB_COND_NE ((1 << 1) | 1) -#define HPPA_SUB_COND_SGE ((2 << 1) | 1) -#define HPPA_SUB_COND_SGT ((3 << 1) | 1) -#define HPPA_SUB_COND_UGE ((4 << 1) | 1) -#define HPPA_SUB_COND_UGT ((5 << 1) | 1) -#define HPPA_SUB_COND_NSV ((6 << 1) | 1) -#define HPPA_SUB_COND_EV ((7 << 1) | 1) - -/* Table 5-4: Addition conditions */ -#define HPPA_ADD_COND_NEVER ((0 << 1) | 0) -#define HPPA_ADD_COND_EQ ((1 << 1) | 0) -#define HPPA_ADD_COND_LT ((2 << 1) | 0) -#define HPPA_ADD_COND_LE ((3 << 1) | 0) -#define HPPA_ADD_COND_NUV ((4 << 1) | 0) -#define HPPA_ADD_COND_ZUV ((5 << 1) | 0) -#define HPPA_ADD_COND_SV ((6 << 1) | 0) -#define HPPA_ADD_COND_OD ((7 << 1) | 0) -#define HPPA_ADD_COND_ALWAYS ((0 << 1) | 1) -#define HPPA_ADD_COND_NE ((1 << 1) | 1) -#define HPPA_ADD_COND_GE ((2 << 1) | 1) -#define HPPA_ADD_COND_GT ((3 << 1) | 1) -#define HPPA_ADD_COND_UV ((4 << 1) | 1) -#define HPPA_ADD_COND_VNZ ((5 << 1) | 1) -#define HPPA_ADD_COND_NSV ((6 << 1) | 1) -#define HPPA_ADD_COND_EV ((7 << 1) | 1) - -/* Table 5-5: Logical instruction conditions */ -#define HPPA_LOGICAL_COND_NEVER ((0 << 1) | 0) -#define HPPA_LOGICAL_COND_ZERO ((1 << 1) | 0) -#define HPPA_LOGICAL_COND_MSB_SET ((2 << 1) | 0) -#define HPPA_LOGICAL_COND_MSB_SET_OR_ZERO ((3 << 1) | 0) -#define HPPA_LOGICAL_COND_LSB_SET ((7 << 1) | 0) -#define HPPA_LOGICAL_COND_ALWAYS ((0 << 1) | 1) -#define HPPA_LOGICAL_COND_NZ ((1 << 1) | 1) -#define HPPA_LOGICAL_COND_MSB_CLR ((2 << 1) | 1) -#define HPPA_LOGICAL_COND_MSB_CLR_AND_NZ ((3 << 1) | 1) -#define HPPA_LOGICAL_COND_LSB_CLR ((7 << 1) | 1) - -/* Table 5-6: Unit Conditions */ -#define HPPA_UNIT_COND_NEVER ((0 << 1) | 0) -#define HPPA_UNIT_COND_SBZ ((2 << 1) | 0) -#define HPPA_UNIT_COND_SHZ ((3 << 1) | 0) -#define HPPA_UNIT_COND_SDC ((4 << 1) | 0) -#define HPPA_UNIT_COND_SBC ((6 << 1) | 0) -#define HPPA_UNIT_COND_SHC ((7 << 1) | 0) -#define HPPA_UNIT_COND_ALWAYS ((0 << 1) | 1) -#define HPPA_UNIT_COND_NBZ ((2 << 1) | 1) -#define HPPA_UNIT_COND_NHZ ((3 << 1) | 1) -#define HPPA_UNIT_COND_NDC ((4 << 1) | 1) -#define HPPA_UNIT_COND_NBC ((6 << 1) | 1) -#define HPPA_UNIT_COND_NHC ((7 << 1) | 1) - -/* Table 5-7: Shift/Extract/Deposit Conditions */ -#define HPPA_BIT_COND_NEVER (0) -#define HPPA_BIT_COND_ZERO (1) -#define HPPA_BIT_COND_MSB_SET (2) -#define HPPA_BIT_COND_LSB_SET (3) -#define HPPA_BIT_COND_ALWAYS (4) -#define HPPA_BIT_COND_SOME_SET (5) -#define HPPA_BIT_COND_MSB_CLR (6) -#define HPPA_BIT_COND_LSB_CLR (7) - -#define hppa_mtsar(p, r) \ - p = hppa_emit (p, hppa_opcode(0x00) | hppa_op_r1(11) | hppa_op_r2(r) | (0xC2 << 5)) - -#define hppa_bl_full(p, n, target, t) do { \ - g_assert (hppa_check_bits (target, 17)); \ - p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(t) | hppa_op_imm17(((int)(((target) - 8)>>2))) | ((n) << 1)); \ -} while (0) - -#define hppa_bl(p, target, t) hppa_bl_full(p, 0, target, t) -#define hppa_bl_n(p, target, t) hppa_bl_full(p, 1, target, t) - -#define hppa_bv(p, x, b) \ - p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(b) | hppa_op_r2(x) | (6 << 13)) - -#define hppa_blr(p, x, t) \ - p = hppa_emit (p, hppa_opcode(0x3A) | hppa_op_r1(t) | hppa_op_r2(x) | (2 << 13)) - -/* hardcoded sr = sr4 */ -#define hppa_ble_full(p, n, d, b) \ - p = hppa_emit (p, hppa_opcode(0x39) | hppa_op_r1(b) | hppa_op_imm17(((int)(d)) >> 2) | (1 << 13) | ((n) << 1)) - -#define hppa_ble(p, d, b) hppa_ble_full(p, 0, d, b) -#define hppa_ble_n(p, d, b) hppa_ble_full(p, 1, d, b) - -#define hppa_be_full(p, n, d, b) \ - p = hppa_emit (p, hppa_opcode(0x38) | hppa_op_r1(b) | hppa_op_imm17(((int)(d)) >> 2) | (1 << 13) | ((n) << 1)) - -#define hppa_be(p, d, b) hppa_be_full(p, 0, d, b) -#define hppa_be_n(p, d, b) hppa_be_full(p, 1, d, b) - -#define hppa_bb_full(p, cond, n, r, b, t) \ - p = hppa_emit (p, hppa_opcode(0x31) | hppa_op_r1(b) | hppa_op_r2(r) | ((cond) << 13) | ((n) << 1) | hppa_op_imm12((int)(t))) - -#define hppa_bb(p, cond, r, b, t) hppa_bb_full(p, cond, 0, r, b, t) -#define hppa_bb_n(p, cond, r, b, t) hppa_bb_full(p, cond, 1, r, b, t) - - -#define hppa_movb(p, r1, r2, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x32) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -#define hppa_movib(p, i, r, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x33) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -#define hppa_combt(p, r1, r2, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x20) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -#define hppa_combf(p, r1, r2, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x22) | hppa_op_r1(r2) | hppa_op_r2(r1) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -#define hppa_combit(p, i, r, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x21) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -#define hppa_combif(p, i, r, cond, target) do { \ - g_assert (hppa_check_bits (target, 12)); \ - p = hppa_emit (p, hppa_opcode(0x23) | hppa_op_r1(r) | (hppa_op_imm5(((int)(i))) << 16) | ((cond) << 13) | hppa_op_imm12(((int)(target)))); \ -} while (0) - -/* TODO: addbt, addbf, addbit, addbif */ - -/* Load/store insns */ -#define hppa_ld_disp(p, op, d, b, t) do { \ - g_assert (hppa_check_bits (d, 14)); \ - p = hppa_emit (p, hppa_opcode(op) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14(((int)(d)))); \ -} while (0) - -#define hppa_ldb(p, d, b, t) hppa_ld_disp(p, 0x10, d, b, t) -#define hppa_ldh(p, d, b, t) hppa_ld_disp(p, 0x11, d, b, t) -#define hppa_ldw(p, d, b, t) hppa_ld_disp(p, 0x12, d, b, t) - -#define hppa_ldwm(p, d, b, t) \ - p = hppa_emit (p, hppa_opcode(0x13) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14(d)); \ - -#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) - -#define hppa_st_disp(p, op, r, d, b) do { \ - g_assert (hppa_check_bits (d, 14)); \ - p = hppa_emit (p, hppa_opcode(op) | hppa_op_r1(b) | hppa_op_r2(r) | hppa_op_imm14(((int)(d)))); \ -} while (0) - -#define hppa_stb(p, r, d, b) hppa_st_disp(p, 0x18, r, d, b) -#define hppa_sth(p, r, d, b) hppa_st_disp(p, 0x19, r, d, b) -#define hppa_stw(p, r, d, b) hppa_st_disp(p, 0x1A, r, d, b) - -#define hppa_stwm(p, r, d, b) \ - p = hppa_emit (p, hppa_opcode(0x1B) | hppa_op_r1(b) | hppa_op_r2(r) | hppa_op_imm14(d)) - -#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) - -/* s = 0, u = 0, cc = 0, m = 0 */ -#define hppa_ld_indexed(p, op, x, b, t) \ - p = hppa_emit (p, hppa_opcode(0x03) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t) | (op << 6)) - -#define hppa_ldbx(p, x, b, t) hppa_ld_indexed(p, 0, x, b, t) -#define hppa_ldhx(p, x, b, t) hppa_ld_indexed(p, 1, x, b, t) -#define hppa_ldwx(p, x, b, t) hppa_ld_indexed(p, 2, x, b, t) - -#define hppa_ldil(p, i, t) \ - p = hppa_emit (p, hppa_opcode(0x08) | hppa_op_r1(t) | hppa_op_imm21(((int)(i)))) - -#define hppa_ldo(p, d, b, t) \ - p = hppa_emit (p, hppa_opcode(0x0D) | hppa_op_r1(b) | hppa_op_r2(t) | hppa_op_imm14((int)(d))) - -#define hppa_set(p, imm, t) do { \ - if (hppa_check_bits ((int)(imm), 14)) \ - hppa_ldo (p, (int)(imm), hppa_r0, t); \ - else { \ - hppa_ldil (p, hppa_lsel (imm), t); \ - hppa_ldo (p, hppa_rsel (imm), t, t); \ - } \ -} while (0) - -/* addil's destination is always r1 */ -#define hppa_addil(p, i, r) \ - p = hppa_emit (p, hppa_opcode(0x0A) | hppa_op_r1(r) | hppa_op_imm21(i)) - -#define hppa_alu_op(p, op, cond, r1, r2, t) \ - p = hppa_emit (p, hppa_opcode_alu(0x02, op) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t) | ((cond) << 12)) - -#define hppa_add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x18, cond, r1, r2, t) -#define hppa_add(p, r1, r2, t) hppa_add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x28, cond, r1, r2, t) -#define hppa_addl(p, r1, r2, t) hppa_addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x38, cond, r1, r2, t) -#define hppa_addo(p, r1, r2, t) hppa_addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_addc_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1C, cond, r1, r2, t) -#define hppa_addc(p, r1, r2, t) hppa_addc_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_addco_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3C, cond, r1, r2, t) -#define hppa_addco(p, r1, r2, t) hppa_addco_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh1add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x19, cond, r1, r2, t) -#define hppa_sh1add(p, r1, r2, t) hppa_sh1add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh1addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x29, cond, r1, r2, t) -#define hppa_sh1addl(p, r1, r2, t) hppa_sh1addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh1addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x39, cond, r1, r2, t) -#define hppa_sh1addo(p, r1, r2, t) hppa_sh1addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh2add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1A, cond, r1, r2, t) -#define hppa_sh2add(p, r1, r2, t) hppa_sh2add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh2addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2A, cond, r1, r2, t) -#define hppa_sh2addl(p, r1, r2, t) hppa_sh2addl_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh2addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3A, cond, r1, r2, t) -#define hppa_sh2addo(p, r1, r2, t) hppa_sh2addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh3add_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x1B, cond, r1, r2, t) -#define hppa_sh3add(p, r1, r2, t) hppa_sh3add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh3addl_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2B, cond, r1, r2, t) -#define hppa_sh3addl(p, r1, r2, t) hppa_add_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) -#define hppa_sh3addo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x3B, cond, r1, r2, t) -#define hppa_sh3addo(p, r1, r2, t) hppa_sh3addo_cond(p, HPPA_ADD_COND_NEVER, r1, r2, t) - -#define hppa_sub_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x10, cond, r1, r2, t) -#define hppa_sub(p, r1, r2, t) hppa_sub_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_subo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x30, cond, r1, r2, t) -#define hppa_subo(p, r1, r2, t) hppa_subo_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_subb_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x14, cond, r1, r2, t) -#define hppa_subb(p, r1, r2, t) hppa_subb_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_subbo_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x34, cond, r1, r2, t) -#define hppa_subbo(p, r1, r2, t) hppa_subbo_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_subt_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x13, cond, r1, r2, t) -#define hppa_subt(p, r1, r2, t) hppa_subt_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_subto_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x33, cond, r1, r2, t) -#define hppa_subto(p, r1, r2, t) hppa_subto_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_ds_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x11, cond, r1, r2, t) -#define hppa_ds(p, r1, r2, t) hppa_ds_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) -#define hppa_comclr_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x22, cond, r1, r2, t) -#define hppa_comclr(p, r1, r2, t) hppa_comclr_cond(p, HPPA_SUB_COND_NEVER, r1, r2, t) - -#define hppa_or_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x09, cond, r1, r2, t) -#define hppa_or(p, r1, r2, t) hppa_or_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) -#define hppa_copy(p, r1, r2) hppa_or(p, r1, hppa_r0, r2) -#define hppa_nop(p) hppa_or(p, hppa_r0, hppa_r0, hppa_r0) -#define hppa_xor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x0A, cond, r1, r2, t) -#define hppa_xor(p, r1, r2, t) hppa_xor_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) -#define hppa_and_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x08, cond, r1, r2, t) -#define hppa_and(p, r1, r2, t) hppa_and_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) -#define hppa_andcm_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x00, cond, r1, r2, t) -#define hppa_andcm(p, r1, r2, t) hppa_andcm_cond(p, HPPA_LOGICAL_COND_NEVER, r1, r2, t) - -#define hppa_uxor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x0E, cond, r1, r2, t) -#define hppa_uxor(p, r1, r2, t) hppa_uxor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) -#define hppa_uaddcm_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x26, cond, r1, r2, t) -#define hppa_uaddcm(p, r1, r2, t) hppa_uaddcm_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) -#define hppa_uaddcmt_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x27, cond, r1, r2, t) -#define hppa_uaddcmt(p, r1, r2, t) hppa_uaddcmt_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) -#define hppa_dcor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2E, cond, r1, r2, t) -#define hppa_dcor(p, r1, r2, t) hppa_dcor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) -#define hppa_idcor_cond(p, cond, r1, r2, t) hppa_alu_op(p, 0x2F, cond, r1, r2, t) -#define hppa_idcor(p, r1, r2, t) hppa_idcor_cond(p, HPPA_UNIT_COND_NEVER, r1, r2, t) - -#define hppa_addi(p, i, r, t) \ - p = hppa_emit (p, hppa_opcode(0x2D) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) - -#define hppa_subi(p, i, r, t) \ - p = hppa_emit (p, hppa_opcode(0x25) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) - -#define hppa_not(p, r, t) hppa_subi(p, -1, r, t) - -#define hppa_comiclr(p, i, r, t) \ - p = hppa_emit (p, hppa_opcode(0x24) | hppa_op_r1(r) | hppa_op_r2(t) | hppa_op_imm11(((int)(i)))) - -#define hppa_vshd(p, r1, r2, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t)) - -/* shift is a register */ -#define hppa_lshr(p, r, shift, t) \ - do { \ - hppa_mtsar(p, shift); \ - hppa_vshd(p, hppa_r0, r, t); \ - } while (0) - -/* shift is a constant */ -#define hppa_shd(p, r1, r2, shift, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r2) | hppa_op_r2(r1) | hppa_op_r3(t) | (2 << 10) | ((31 - (shift)) << 5)) - -#define hppa_vextru(p, r, len, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (4 << 10) | (32 - (len))) - -#define hppa_vextrs(p, r, len, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (5 << 10) | (32 - (len))) - -/* shift is a register */ -#define hppa_shr(p, r, shift, t) \ - do { \ - hppa_subi(p, 31, shift, t); \ - hppa_mtsar(p, t); \ - hppa_vextrs(p, r, 32, t); \ - } while (0) - -/* shift is a constant */ -#define hppa_extru(p, r, shift, len, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (6 << 10) | ((shift) << 5) | (32 - (len))) - -#define hppa_extrs(p, r, shift, len, t) \ - p = hppa_emit (p, hppa_opcode(0x34) | hppa_op_r1(r) | hppa_op_r2(t) | (7 << 10) | ((shift) << 5) | (32 - (len))) - -#define hppa_vdep(p, r, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(r) | hppa_op_r2(t) | (1 << 10) | (32 - (len))) - -#define hppa_dep(p, r, pos, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (3 << 10) | ((31 - (pos)) << 5) | (32 - (len))) - -#define hppa_vdepi(p, i, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (5 << 10) | (32 - (len))) - -#define hppa_depi(p, i, pos, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (7 << 10) | ((31 - (pos)) << 5) | (32 - (len))) - -#define hppa_zvdep(p, r, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (0 << 10) | (32 - (len))) - -/* shift is a register */ -#define hppa_shl(p, r, shift, t) \ - do { \ - hppa_subi(p, 31, shift, t); \ - hppa_mtsar(p, t); \ - hppa_zvdep(p, r, 32, t); \ - } while (0) - -#define hppa_zdep(p, r, pos, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | hppa_op_r2(r) | (2 << 10) | ((31 - (pos)) << 5) | (32 - (len))) - -#define hppa_zvdepi(p, i, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (4 << 10) | (32 - (len))) - -#define hppa_zdepi(p, i, pos, len, t) \ - p = hppa_emit (p, hppa_opcode(0x35) | hppa_op_r1(t) | (hppa_op_imm5(((int)(i))) << 16) | (6 << 10) | ((31 - (pos)) << 5) | (32 - (len))) - -/* FPU insns */ -/* These are valid for op == 0x0C only, for op == 0x0E there is an extra bit for - * r and t */ -#define hppa_fpu_class0(p, r, sub, fmt, t) \ - p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r) | hppa_op_r3(t) | ((sub) << 13) | ((fmt) << 11)) - -#define hppa_fpu_class1(p, r, sub, df, sf, t) \ - p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r) | hppa_op_r3(t) | ((sub) << 15) | ((df) << 13) | ((sf) << 11) | (1 << 9)) - -#define hppa_fpu_class2(p, r1, r2, sub, fmt, n, cond) \ - p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(cond) | ((sub) << 13) | ((fmt) << 11) | (2 << 9) | ((n) << 5)) - -#define hppa_fpu_class3(p, r1, r2, sub, fmt, t) \ - p = hppa_emit (p, hppa_opcode(0x0C) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(t) | ((sub) << 13) | ((fmt) << 11) | (3 << 9)) - -#define HPPA_FP_FMT_SGL 0 -#define HPPA_FP_FMT_DBL 1 -#define HPPA_FP_FMT_QUAD 3 - -#define hppa_fcpy(p, fmt, r, t) hppa_fpu_class0(p, r, 2, fmt, t) -#define hppa_fabs(p, fmt, r, t) hppa_fpu_class0(p, r, 3, fmt, t) -#define hppa_fsqrt(p, fmt, r, t) hppa_fpu_class0(p, r, 4, fmt, t) -#define hppa_frnd(p, fmt, r, t) hppa_fpu_class0(p, r, 5, fmt, t) - -#define hppa_fcnvff(p, sf, df, r, t) hppa_fpu_class1(p, r, 0, df, sf, t) -#define hppa_fcnvxf(p, sf, df, r, t) hppa_fpu_class1(p, r, 1, df, sf, t) -#define hppa_fcnvfx(p, sf, df, r, t) hppa_fpu_class1(p, r, 2, df, sf, t) -#define hppa_fcnvfxt(p, sf, df, r, t) hppa_fpu_class1(p, r, 3, df, sf, t) - -#define hppa_fcmp(p, fmt, cond, r1, r2) hppa_fpu_class2(p, r1, r2, 0, fmt, 0, cond) -#define hppa_ftest(p, cond) hppa_fpu_class2(p, 0, 0, 1, 0, 1, cond) - -#define hppa_fadd(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 0, fmt, t) -#define hppa_fsub(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 1, fmt, t) -#define hppa_fmul(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 2, fmt, t) -#define hppa_fdiv(p, fmt, r1, r2, t) hppa_fpu_class3(p, r1, r2, 3, fmt, t) - -/* Note: fmpyadd and fmpysub have different fmt encodings as the other - * FP ops - */ -#define hppa_fmpyadd(p, fmt, rm1, rm2, tm, ra, ta) \ - p = hppa_emit (p, hppa_opcode(0x06) | hppa_op_r1(rm1) | hppa_op_r2(rm2) | hppa_op_r3(tm) | ((ta) << 11) | ((ra) << 6) | ((fmt) << 5)) - -#define hppa_fmpyadd_sgl(p, rm1, rm2, tm, ra, ta) \ - hppa_fmpyadd(p, 1, rm1, rm2, tm, ra, ta) - -#define hppa_fmpyadd_dbl(p, rm1, rm2, tm, ra, ta) \ - hppa_fmpyadd(p, 0, rm1, rm2, tm, ra, ta) - -#define hppa_fmpysub(p, fmt, rm1, rm2, tm, ra, ta) \ - p = hppa_emit (p, hppa_opcode(0x06) | hppa_op_r1(rm1) | hppa_op_r2(rm2) | hppa_op_r3(tm) | ((ta) << 11) | ((ra) << 6) | ((fmt) << 5)) - -#define hppa_fmpysub_sgl(p, rm1, rm2, tm, ra, ta) \ - hppa_fmpysub(p, 1, rm1, rm2, tm, ra, ta) - -#define hppa_fmpysub_dbl(p, rm1, rm2, tm, ra, ta) \ - hppa_fmpysub(p, 0, rm1, rm2, tm, ra, ta) - -#define hppa_xmpyu(p, r1, r2, t) \ - p = hppa_emit (p, hppa_opcode(0x0E) | hppa_op_r1(r1) | hppa_op_r2(r2) | hppa_op_r3(t) | (2 << 13) | (3 << 9) | (1 << 8)) - -#define hppa_fldwx(p, x, b, t, half) \ - p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t) | ((half) << 6)) - -#define hppa_flddx(p, x, b, t) \ - p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(t)) - -#define hppa_fstwx(p, r, half, x, b) \ - p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(r) | ((half) << 6) | (1 << 9)) - -#define hppa_fstdx(p, r, x, b) \ - p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | hppa_op_r2(x) | hppa_op_r3(r) | (1 << 9)) - -#define hppa_fldws(p, d, b, t, half) \ - p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(t) | ((half) << 6) | (1 << 12)) - -#define hppa_fldds(p, d, b, t) \ - p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(t) | (1 << 12)) - -#define hppa_fstws(p, r, half, d, b) \ - p = hppa_emit (p, hppa_opcode(0x09) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(r) | ((half) << 6) | (1 << 12) | (1 << 9)) - -#define hppa_fstds(p, r, d, b) \ - p = hppa_emit (p, hppa_opcode(0x0B) | hppa_op_r1(b) | (hppa_op_imm5(((int)(d))) << 16) | hppa_op_r3(r) | (1 << 12) | (1 << 9)) - - -/* Not yet converted old macros - used by interpreter */ -#define hppa_ldd_with_flags(p, disp, base, dest, m, a) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - g_assert(((disp) & 7) == 0); \ - *c++ = (0x50000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_ldd(p, disp, base, dest) \ - hppa_ldd_with_flags(p, disp, base, dest, 0, 0) - -#define hppa_ldd_mb(p, disp, base, dest) \ - hppa_ldd_with_flags(p, disp, base, dest, 1, 1) - -#define hppa_std_with_flags(p, src, disp, base, m, a) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - g_assert(((disp) & 7) == 0); \ - *c++ = (0x70000000 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_std(p, disp, base, dest) \ - hppa_std_with_flags(p, disp, base, dest, 0, 0) - -#define hppa_std_ma(p, disp, base, dest) \ - hppa_std_with_flags(p, disp, base, dest, 1, 0) - -#define hppa_fldd_with_flags(p, disp, base, dest, m, a) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - *c++ = (0x50000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((dest) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_fldd(p, disp, base, dest) \ - hppa_fldd_with_flags(p, disp, base, dest, 0, 0) - -#define hppa_fstd_with_flags(p, src, disp, base, m, a) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im10a = (disp) >> 3; \ - *c++ = (0x70000002 | (((im10a) & 0x3ff) << 4) | ((base) << 21) | ((src) << 16) | neg | (m ? 0x8 : 0) | (a ? 0x4 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_fstd(p, disp, base, dest) \ - hppa_fstd_with_flags(p, disp, base, dest, 0, 0) - - -#define hppa_fldw_with_flags(p, im11a, base, dest, r) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im11a = (disp) >> 2; \ - *c++ = (0x5c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((dest) << 16) | neg | ((r) ? 0x2 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_fldw(p, disp, base, dest) \ - hppa_fldw_with_flags(p, disp, base, dest, 1) - -#define hppa_fstw_with_flags(p, src, disp, base, r) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - int neg = (disp) < 0; \ - int im11a = (disp) >> 2; \ - *c++ = (0x7c000000 | (((im11a) & 0x7ff) << 3) | ((base) << 21) | ((src) << 16) | neg | ((r) ? 0x2 : 0)); \ - p = (void *)c; \ - } while (0) - -#define hppa_fstw(p, src, disp, base) \ - hppa_fstw_with_flags(p, src, disp, base, 1) - -/* only works on right half SP registers */ -#define hppa_fcnv(p, src, ssng, dest, dsng) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - *c++ = (0x38000200 | ((src) << 21) | ((ssng) ? 0x80 : 0x800) | (dest) | ((dsng) ? 0x40 : 0x2000)); \ - p = (void *)c; \ - } while (0) - -#define hppa_fcnv_sng_dbl(p, src, dest) \ - hppa_fcnv(p, src, 1, dest, 0) - -#define hppa_fcnv_dbl_sng(p, src, dest) \ - hppa_fcnv(p, src, 0, dest, 1) - -#define hppa_extrdu(p, src, pos, len, dest) \ - do { \ - unsigned int *c = (unsigned int *)(p); \ - *c++ = (0xd8000000 | ((src) << 21) | ((dest) << 16) | ((pos) > 32 ? 0x800 : 0) | (((pos) & 31) << 5) | ((len) > 32 ? 0x1000 : 0) | (32 - (len & 31))); \ - p = (void *)c; \ - } while (0) - -#define hppa_bve(p, reg, link) \ - do { \ - *(p) = (0xE8001000 | ((link ? 7 : 6) << 13) | ((reg) << 21)); \ - p++; \ - } while (0) - -#define hppa_blve(p, reg) \ - hppa_bve(p, reg, 1) - -#endif diff --git a/hppa/tramp.c b/hppa/tramp.c deleted file mode 100644 index e012436..0000000 --- a/hppa/tramp.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - Copyright (c) 2003 Bernie Solomon - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - Trampoline generation for HPPA - currently (Oct 9th 2003) only - supports 64 bits - and the HP compiler. -*/ -#ifndef __linux__ - -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/tabledefs.h" -#include "hppa-codegen.h" - -#if SIZEOF_VOID_P != 8 -#error "HPPA code only currently supports 64bit pointers" -#endif - -// debugging flag which dumps code generated -static int debug_asm = 0; - - -static void flush_cache(void *address, int length) -{ -#ifdef __GNUC__ -#error "currently only supports the HP C compiler" -#else - int cache_line_size = 16; - ulong_t end = (ulong_t)address + length; - register ulong_t sid; - register ulong_t offset = (ulong_t) address; - register ulong_t r0 = 0; - - _asm("LDSID", 0, offset, sid); - _asm("MTSP", sid, 0); - _asm("FDC", r0, 0, offset); - offset = (offset + (cache_line_size - 1)) & ~(cache_line_size - 1); - while (offset < end) { - (void)_asm("FDC", r0, 0, offset); - offset += cache_line_size; - } - _asm("SYNC"); - offset = (ulong_t) address; - _asm("FIC", r0, 0, offset); - offset = (offset + (cache_line_size - 1)) & ~(cache_line_size - 1); - while (offset < end) { - (void)_asm("FIC", r0, 0, offset); - offset += cache_line_size; - } - _asm("SYNC"); - // sync needs at least 7 instructions after it... this is what is used for NOP - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); - _asm("OR", 0, 0, 0); -#endif -} - -static void disassemble (guint32 *code, int n_instrs) -{ - const char *tmp_file = "/tmp/mono_adb.in"; - FILE *fp = fopen(tmp_file, "w"); - int i; - for (i = 0; i < n_instrs; i++) - fprintf(fp, "0x%08x=i\n", code[i]); - fprintf(fp, "$q\n"); - fclose(fp); - system("adb64 param_count, sig->hasthis, sig->explicit_this, string_ctor, sig->ret->type); - } - - // everything takes 8 bytes unless it is a bigger struct - for (param = 0; param < sig->param_count; param++) { - if (sig->params[param]->byref) - frame_size += 8; - else { - if (sig->params[param]->type != MONO_TYPE_VALUETYPE) - frame_size += 8; - else { - if (sig->params [param]->data.klass->enumtype) - frame_size += 8; - else { - frame_size += 15; // large structs are 16 byte aligned - frame_size &= ~15; - frame_size += mono_class_native_size (sig->params [param]->data.klass, NULL); - frame_size += 7; - frame_size &= ~7; - } - } - } - } - - if (sig->hasthis) - frame_size += 8; - // 16 byte alignment - if ((frame_size & 15) != 0) - frame_size += 8; - // minimum is 64 bytes - if (frame_size < 64) - frame_size = 64; - - if (debug_asm) - fprintf(stderr, "outgoing frame size: %d\n", frame_size); - - frame_size += 16; // for the frame marker (called routines stuff return address etc. here) - frame_size += 32; // spill area for r4, r5 and r27 (16 byte aligned) - - spill_offset = -frame_size; - parameter_offset = spill_offset + 32; // spill area size is really 24 - spill_offset += 8; - - /* the rest executes twice - once to count instructions so we can - allocate memory in one block and once to fill it in... the count - should be pretty fast anyway... - */ -generate: - pc = 0; - arg_reg = 26; - arg_offset = 0; - args_on_stack = 0; - parameter_slot = parameter_offset; - - ADD_INST(code, pc, hppa_std(code, 2, -16, 30)); // STD %r2,-16(%r30) - ADD_INST(code, pc, hppa_std_ma(code, 3, frame_size, 30)); - ADD_INST(code, pc, hppa_std(code, 4, spill_offset, 30)); - ADD_INST(code, pc, hppa_std(code, 5, spill_offset + 8, 30)); - ADD_INST(code, pc, hppa_copy(code, 29, 3)); // COPY %r29,%r3 - ADD_INST(code, pc, hppa_std(code, 27, spill_offset + 16, 30)); - ADD_INST(code, pc, hppa_nop(code)); // NOP - - ADD_INST(code, pc, hppa_std(code, 26, -64, 29)); // STD %r26,-64(%r29) callme - ADD_INST(code, pc, hppa_std(code, 25, -56, 29)); // STD %r25,-56(%r29) retval - ADD_INST(code, pc, hppa_std(code, 24, -48, 29)); // STD %r24,-48(%r29) this_obj - ADD_INST(code, pc, hppa_std(code, 23, -40, 29)); // STD %r23,-40(%r29) arguments - - if (sig->param_count > 0) - ADD_INST(code, pc, hppa_copy(code, 23, 4)); // r4 is the current pointer to the stackval array of args - - if (sig->hasthis) { - if (sig->call_convention != MONO_CALL_THISCALL) { - ADD_INST(code, pc, hppa_copy(code, 24, arg_reg)); - --arg_reg; - parameter_slot += 8; - } else { - fprintf(stderr, "case I didn't handle\n"); - } - } - - for (param = 0; param < sig->param_count; param++) { - int type = sig->params[param]->type; - if (sig->params[param]->byref) { - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); - ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); - } else { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); - --arg_reg; - } - arg_offset += sizeof(stackval); - parameter_slot += 8; - continue; - } - typeswitch: - switch (type) { - case MONO_TYPE_CHAR: - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldw(code, arg_offset, 4, 5)); - switch (type) { - case MONO_TYPE_I4: - case MONO_TYPE_U4: - ADD_INST(code, pc, hppa_stw(code, 5, parameter_slot + 4, 30)); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - ADD_INST(code, pc, hppa_sth(code, 5, parameter_slot + 6, 30)); - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - ADD_INST(code, pc, hppa_stb(code, 5, parameter_slot + 7, 30)); - break; - } - } else { - ADD_INST(code, pc, hppa_ldw(code, arg_offset, 4, arg_reg)); - --arg_reg; - } - arg_offset += sizeof(stackval); - parameter_slot += 8; - break; - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_STRING: - case MONO_TYPE_OBJECT: - case MONO_TYPE_CLASS: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_PTR: - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); - ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); - } else { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); - --arg_reg; - } - arg_offset += sizeof(stackval); - parameter_slot += 8; - break; - case MONO_TYPE_R8: - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); - ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); - } else { - ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, FP_ARG_REG(arg_reg))); - --arg_reg; - } - arg_offset += sizeof(stackval); - parameter_slot += 8; - break; - case MONO_TYPE_R4: - if (args_on_stack) { - ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, 22)); - ADD_INST(code, pc, hppa_fcnv_dbl_sng(code, 22, 22)); - ADD_INST(code, pc, hppa_fstw(code, 22, parameter_slot + 4, 30)); - } else { - ADD_INST(code, pc, hppa_fldd(code, arg_offset, 4, FP_ARG_REG(arg_reg))); - ADD_INST(code, pc, hppa_fcnv_dbl_sng(code, FP_ARG_REG(arg_reg), FP_ARG_REG(arg_reg))); - --arg_reg; - } - arg_offset += sizeof(stackval); - parameter_slot += 8; - break; - case MONO_TYPE_VALUETYPE: - if (sig->params [param]->data.klass->enumtype) { - type = sig->params [param]->data.klass->enum_basetype->type; - goto typeswitch; - } else { - int size = mono_class_native_size (sig->params [param]->data.klass, NULL); - // assumes struct is 8 byte aligned whatever its size... (as interp.c guarantees at present) - // copies multiple of 8 bytes which may include some trailing garbage but should be safe - if (size <= 8) { - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); - ADD_INST(code, pc, hppa_ldd(code, 0, 5, 5)); - ADD_INST(code, pc, hppa_std(code, 5, parameter_slot, 30)); - } else { - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, arg_reg)); - ADD_INST(code, pc, hppa_ldd(code, 0, arg_reg, arg_reg)); - --arg_reg; - } - parameter_slot += 8; - } else { - int soffset = 0; - if ((parameter_slot & 15) != 0) { - --arg_reg; - if (arg_reg < 19) { - args_on_stack = 1; - } - parameter_slot += 8; - } - ADD_INST(code, pc, hppa_ldd(code, arg_offset, 4, 5)); - // might generate a lot of code for very large structs... should - // use a loop or routine call them - while (size > 0) { - if (args_on_stack) { - ADD_INST(code, pc, hppa_ldd(code, soffset, 5, 31)); - ADD_INST(code, pc, hppa_std(code, 31, parameter_slot, 30)); - } else { - ADD_INST(code, pc, hppa_ldd(code, soffset, 5, arg_reg)); - --arg_reg; - if (arg_reg < 19) - args_on_stack = 1; - } - parameter_slot += 8; - soffset += 8; - size -= 8; - } - } - arg_offset += sizeof(stackval); - break; - } - break; - default: - g_error ("mono_create_trampoline: unhandled arg type %d", type); - return NULL; - } - - if (arg_reg < 19) { - args_on_stack = 1; - } - } - - // for large return structs just pass on the buffer given to us. - if (sig->ret->type == MONO_TYPE_VALUETYPE && sig->ret->data.klass->enumtype == 0) { - int size = mono_class_native_size (sig->ret->data.klass, NULL); - if (size > 16) { - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 28)); - ADD_INST(code, pc, hppa_ldd(code, 0, 28, 28)); - } - } - - ADD_INST(code, pc, hppa_nop(code)); // NOP - ADD_INST(code, pc, hppa_ldd(code, -64, 29, 5)); - ADD_INST(code, pc, hppa_ldd(code, 24, 5, 27)); - ADD_INST(code, pc, hppa_ldd(code, 16, 5, 5)); - ADD_INST(code, pc, hppa_blve(code, 5)); - ADD_INST(code, pc, hppa_ldo(code, parameter_offset + 64, 30, 29)); - ADD_INST(code, pc, hppa_ldd(code, spill_offset + 16, 30, 27)); - ADD_INST(code, pc, hppa_nop(code)); // NOP - - if (string_ctor) { - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); // STD %r28,0(%r19) - } - else if (sig->ret->type != MONO_TYPE_VOID) { - int type = sig->ret->type; - - rettypeswitch: - switch (type) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_stb(code, 28, 0, 19)); // STB %r28,0(%r19) - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_stw(code, 28, 0, 19)); // STW %r28,0(%r19) - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_sth(code, 28, 0, 19)); // STH %r28,0(%r19) - break; - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_STRING: - case MONO_TYPE_OBJECT: - case MONO_TYPE_CLASS: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_PTR: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); // STD %r28,0(%r19) - break; - case MONO_TYPE_R8: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_fstd(code, 4, 0, 19)); // FSTD %fr4,0(%r19) - break; - case MONO_TYPE_R4: - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); // LDD -56(%r3),%r19 - ADD_INST(code, pc, hppa_fstw(code, 4, 0, 19)); // FSTW %fr4r,0(%r19) - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - type = sig->ret->data.klass->enum_basetype->type; - goto rettypeswitch; - } else { - int size = mono_class_native_size (sig->ret->data.klass, NULL); - if (size <= 16) { - int reg = 28; - int off = 0; - ADD_INST(code, pc, hppa_ldd(code, -56, 3, 19)); - ADD_INST(code, pc, hppa_ldd(code, 0, 19, 19)); - if (size > 8) { - ADD_INST(code, pc, hppa_std(code, 28, 0, 19)); - size -= 8; - reg = 29; - off += 8; - } - // get rest of value right aligned in the register - ADD_INST(code, pc, hppa_extrdu(code, reg, 8 * size - 1, 8 * size, reg)); - if ((size & 1) != 0) { - ADD_INST(code, pc, hppa_stb(code, reg, off + size - 1, 19)); - ADD_INST(code, pc, hppa_extrdu(code, reg, 55, 56, reg)); - size -= 1; - } - if ((size & 2) != 0) { - ADD_INST(code, pc, hppa_sth(code, reg, off + size - 2, 19)); - ADD_INST(code, pc, hppa_extrdu(code, reg, 47, 48, reg)); - size -= 2; - } - if ((size & 4) != 0) - ADD_INST(code, pc, hppa_stw(code, reg, off + size - 4, 19)); - } - break; - } - default: - g_error ("mono_create_trampoline: unhandled ret type %d", type); - return NULL; - } - } - - ADD_INST(code, pc, hppa_ldd(code, -frame_size-16, 30, 2)); - ADD_INST(code, pc, hppa_ldd(code, spill_offset, 30, 4)); - ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 5)); - ADD_INST(code, pc, hppa_bve(code, 2, 0)); - ADD_INST(code, pc, hppa_ldd_mb(code, -frame_size, 30, 3)); - - if (code == NULL) { - descriptor = (void **)g_malloc(4 * sizeof(void *) + pc * sizeof(unsigned int)); - code = (unsigned int *)((char *)descriptor + 4 * sizeof(void *)); - code_start = code; - save_pc = pc; - goto generate; - } else - g_assert(pc == save_pc); - - if (debug_asm) { - fprintf(stderr, "generated: %d bytes\n", pc * 4); - disassemble(code_start, pc); - } - - // must do this so we can actually execute the code we just put in memory - flush_cache(code_start, 4 * pc); - - descriptor[0] = 0; - descriptor[1] = 0; - descriptor[2] = code_start; - descriptor[3] = 0; - - return (MonoPIFunc)descriptor; -} - -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig = method->signature; - MonoJitInfo *ji; - int i; - int pc; - int param; - void **descriptor = NULL; - void **data = NULL; - unsigned int *code = NULL; - unsigned int *code_start = NULL; - int arg_reg = 26; - int arg_offset = 0; - int frame_size; - int invoke_rec_offset; - int stack_vals_offset; - int stack_val_pos; - int arg_val_pos; - int spill_offset; - int *vtoffsets; - int t; - - if (debug_asm) { - fprintf(stderr, "mono_create_method_pointer %s: flags %d\n", method->name, method->flags); - fprintf(stderr, "method: # params %d has this %d exp this %d\n", sig->param_count, sig->hasthis, sig->explicit_this); - fprintf(stderr, "ret %d\n", sig->ret->type); - for (i = 0; i < sig->param_count; i++) - fprintf(stderr, "%d: %d\n", i, sig->params[i]->type); - } - - // the extra stackval is for the return val if necessary - // the 64 is for outgoing parameters and the 16 is the frame marker. - // the other 16 is space for struct return vals < 16 bytes - frame_size = sizeof(MonoInvocation) + (sig->param_count + 1) * sizeof(stackval) + 16 + 64 + 16; - frame_size += 15; - frame_size &= ~15; - invoke_rec_offset = -frame_size; - vtoffsets = (int *)alloca(sig->param_count * sizeof(int)); - - t = invoke_rec_offset; - - for (i = 0; i < sig->param_count; ++i) - if (sig->params[i]->type == MONO_TYPE_VALUETYPE && - !sig->params[i]->data.klass->enumtype && !sig->params[i]->byref) { - int size = mono_class_native_size (sig->params[i]->data.klass, NULL); - size += 7; - size &= ~7; - t -= size; - frame_size += size; - vtoffsets[i] = t; - } - - stack_vals_offset = invoke_rec_offset + sizeof(MonoInvocation); - stack_vals_offset += 7; - stack_vals_offset &= ~7; - frame_size += 32; - frame_size += 15; - frame_size &= ~15; - spill_offset = -frame_size + 8; - -generate: - stack_val_pos = stack_vals_offset; - arg_val_pos = -64; - pc = 0; - - ADD_INST(code, pc, hppa_std(code, 2, -16, 30)); - ADD_INST(code, pc, hppa_std_ma(code, 3, frame_size, 30)); - ADD_INST(code, pc, hppa_std(code, 4, spill_offset, 30)); - ADD_INST(code, pc, hppa_copy(code, 29, 3)); - ADD_INST(code, pc, hppa_std(code, 27, spill_offset + 8, 30)); - ADD_INST(code, pc, hppa_std(code, 28, spill_offset + 16, 30)); - ADD_INST(code, pc, hppa_nop(code)); - - ADD_INST(code, pc, hppa_std(code, 26, -64, 29)); // STD %r26,-64(%r29) - ADD_INST(code, pc, hppa_std(code, 25, -56, 29)); // STD %r25,-56(%r29) - ADD_INST(code, pc, hppa_std(code, 24, -48, 29)); // STD %r24,-48(%r29) - ADD_INST(code, pc, hppa_std(code, 23, -40, 29)); // STD %r23,-40(%r29) - ADD_INST(code, pc, hppa_std(code, 22, -32, 29)); // STD %r22,-32(%r29) - ADD_INST(code, pc, hppa_std(code, 21, -24, 29)); // STD %r21,-24(%r29) - ADD_INST(code, pc, hppa_std(code, 20, -16, 29)); // STD %r20,-16(%r29) - ADD_INST(code, pc, hppa_std(code, 19, -8, 29)); // STD %r19,-8(%r29) - - ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, parent), 30)); - ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex), 30)); - ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ex_handler), 30)); - ADD_INST(code, pc, hppa_std(code, 0, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, ip), 30)); - - if (data != NULL) - data[0] = method; - ADD_INST(code, pc, hppa_ldd(code, 0, 27, 19)); - ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, method), 30)); - - if (sig->hasthis) { - if (sig->call_convention != MONO_CALL_THISCALL) { - ADD_INST(code, pc, hppa_std(code, arg_reg, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, obj), 30)); - arg_val_pos += 8; - } else { - fprintf(stderr, "case I didn't handle 2\n"); - } - } - - if (data != NULL) - data[2] = (void *)stackval_from_data; - - for (i = 0; i < sig->param_count; ++i) { - if (data != NULL) - data[4 + i] = sig->params[i]; - ADD_INST(code, pc, hppa_ldd(code, (4 + i) * 8, 27, 26)); // LDD x(%r27),%r26 == type - ADD_INST(code, pc, hppa_ldo(code, stack_val_pos, 30, 25)); // LDD x(%r30),%r25 == &stackval - if (sig->params[i]->byref) { - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); - } else { - int type = sig->params[i]->type; - typeswitch: - switch (type) { - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_STRING: - case MONO_TYPE_OBJECT: - case MONO_TYPE_CLASS: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_PTR: - case MONO_TYPE_R8: - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 4, 3, 24)); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 6, 3, 24)); - break; - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_BOOLEAN: - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos + 7, 3, 24)); - break; - case MONO_TYPE_VALUETYPE: - if (sig->params [i]->data.klass->enumtype) { - type = sig->params [i]->data.klass->enum_basetype->type; - goto typeswitch; - } else { - int size = mono_class_native_size (sig->params[i]->data.klass, NULL); - if (size <= 8) - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); - else { - arg_val_pos += 15; - arg_val_pos &= ~15; - ADD_INST(code, pc, hppa_ldo(code, arg_val_pos, 3, 24)); - } - - arg_val_pos += size; - arg_val_pos += 7; - arg_val_pos &= ~7; - arg_val_pos -=8 ; // as it is incremented later - - ADD_INST(code, pc, hppa_ldo(code, vtoffsets[i], 30, 19)); - ADD_INST(code, pc, hppa_std(code, 19, 0, 25)); - } - break; - default: - fprintf(stderr, "can not cope in create method pointer %d\n", sig->params[i]->type); - break; - } - } - - ADD_INST(code, pc, hppa_ldo(code, sig->pinvoke, 0, 23)); // LDI sig->pinvoke,%r23 - ADD_INST(code, pc, hppa_ldd(code, 16, 27, 19)); // LDD x(%r27),%r19 == stackval_from_data - ADD_INST(code, pc, hppa_ldd(code, 16, 19, 20)); // LDD 16(%r19),%r20 - ADD_INST(code, pc, hppa_ldd(code, 24, 19, 27)); // LDD 24(%r19),%r27 - ADD_INST(code, pc, hppa_blve(code, 20)); // BVE,L (%r20),%r2 - ADD_INST(code, pc, hppa_ldo(code, -16, 30, 29)); // LDO -16(%r30),%r29 - ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 27)); - - stack_val_pos += sizeof (stackval); - arg_val_pos += 8; - g_assert(stack_val_pos < -96); - } - - ADD_INST(code, pc, hppa_ldo(code, stack_vals_offset, 30, 19)); - ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, stack_args), 30)); - ADD_INST(code, pc, hppa_ldo(code, stack_val_pos, 30, 19)); - ADD_INST(code, pc, hppa_std(code, 19, invoke_rec_offset + G_STRUCT_OFFSET (MonoInvocation, retval), 30)); - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) { - int size = mono_class_native_size (sig->ret->data.klass, NULL); - // for large return structs pass on the pointer given us by our caller. - if (size > 16) - ADD_INST(code, pc, hppa_ldd(code, spill_offset + 16, 30, 28)); - else // use space left on stack for the return value - ADD_INST(code, pc, hppa_ldo(code, stack_val_pos + sizeof(stackval), 30, 28)); - ADD_INST(code, pc, hppa_std(code, 28, stack_val_pos, 30)); - } - - ADD_INST(code, pc, hppa_ldo(code, invoke_rec_offset, 30, 26)); // address of invocation - - if (data != NULL) - data[1] = (void *)ves_exec_method; - ADD_INST(code, pc, hppa_ldd(code, 8, 27, 19)); // LDD 8(%r27),%r19 - ADD_INST(code, pc, hppa_ldd(code, 16, 19, 20)); // LDD 16(%r19),%r20 - ADD_INST(code, pc, hppa_ldd(code, 24, 19, 27)); // LDD 24(%r19),%r27 - ADD_INST(code, pc, hppa_blve(code, 20)); // BVE,L (%r20),%r2 - ADD_INST(code, pc, hppa_ldo(code, -16, 30, 29)); // LDO -16(%r30),%r29 - ADD_INST(code, pc, hppa_ldd(code, spill_offset + 8, 30, 27)); - if (sig->ret->byref) { - fprintf(stderr, "can'ty cope with ret byref\n"); - } else { - int simpletype = sig->ret->type; - enum_retvalue: - switch (simpletype) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - ADD_INST(code, pc, hppa_ldw(code, stack_val_pos, 30, 28)); // LDW x(%r30),%r28 - break; - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_STRING: - case MONO_TYPE_OBJECT: - case MONO_TYPE_CLASS: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_PTR: - ADD_INST(code, pc, hppa_ldd(code, stack_val_pos, 30, 28)); // LDD x(%r30),%r28 - break; - case MONO_TYPE_R8: - ADD_INST(code, pc, hppa_fldd(code, stack_val_pos, 30, 4)); // FLDD x(%r30),%fr4 - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } else { - int size = mono_class_native_size (sig->ret->data.klass, NULL); - if (size <= 16) { - ADD_INST(code, pc, hppa_ldd(code, stack_val_pos, 30, 28)); - if (size > 8) - ADD_INST(code, pc, hppa_ldd(code, 8, 28, 29)); - ADD_INST(code, pc, hppa_ldd(code, 0, 28, 28)); - } - } - break; - default: - fprintf(stderr, "can't cope with ret type %d\n", simpletype); - return NULL; - } - } - - ADD_INST(code, pc, hppa_ldd(code, -frame_size-16, 30, 2)); - ADD_INST(code, pc, hppa_ldd(code, spill_offset, 30, 4)); - ADD_INST(code, pc, hppa_bve(code, 2, 0)); - ADD_INST(code, pc, hppa_ldd_mb(code, -frame_size, 30, 3)); - if (code == NULL) { - descriptor = (void **)malloc((8 + sig->param_count) * sizeof(void *) + sizeof(unsigned int) * pc); - data = descriptor + 4; - code = (unsigned int *)(data + 4 + sig->param_count); - code_start = code; - goto generate; - } - - if (debug_asm) { - fprintf(stderr, "generated: %d bytes\n", pc * 4); - disassemble(code_start, pc); - } - - flush_cache(code_start, 4 * pc); - - descriptor[0] = 0; - descriptor[1] = 0; - descriptor[2] = code_start; - descriptor[3] = data; - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 4; // does this matter? - ji->code_start = descriptor; - - mono_jit_info_table_add (mono_get_root_domain (), ji); - - return ji->code_start; -} -#endif -- cgit v1.1 From a2b380c30f8e12e508d9b761b9b049d17dff3617 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 1 Mar 2013 20:27:07 +0100 Subject: Remove the unmaintained and incomplete alpha backend. --- Makefile.am | 2 +- alpha/.gitignore | 4 - alpha/Makefile.am | 8 - alpha/alpha-codegen.h | 576 -------------------------------------------------- alpha/test.c | 156 -------------- alpha/tramp.c | 380 --------------------------------- 6 files changed, 1 insertion(+), 1125 deletions(-) delete mode 100644 alpha/.gitignore delete mode 100644 alpha/Makefile.am delete mode 100644 alpha/alpha-codegen.h delete mode 100644 alpha/test.c delete mode 100644 alpha/tramp.c diff --git a/Makefile.am b/Makefile.am index 2cfec09..0bedf77 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -DIST_SUBDIRS = x86 ppc sparc arm s390 s390x alpha amd64 ia64 mips +DIST_SUBDIRS = x86 ppc sparc arm s390 s390x amd64 ia64 mips AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/alpha/.gitignore b/alpha/.gitignore deleted file mode 100644 index 6abcd22..0000000 --- a/alpha/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/Makefile.in -/Makefile -/.deps -/.cvsignore diff --git a/alpha/Makefile.am b/alpha/Makefile.am deleted file mode 100644 index 86cbcb6..0000000 --- a/alpha/Makefile.am +++ /dev/null @@ -1,8 +0,0 @@ - -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-alpha.la - -libmonoarch_alpha_la_SOURCES = tramp.c alpha-codegen.h - -noinst_PROGRAMS = test diff --git a/alpha/alpha-codegen.h b/alpha/alpha-codegen.h deleted file mode 100644 index 46f95e1..0000000 --- a/alpha/alpha-codegen.h +++ /dev/null @@ -1,576 +0,0 @@ -#ifndef __ALPHA_CODEGEN_H__ -#define __ALPHA_CODEGEN_H__ - -/* - http://ftp.digital.com/pub/Digital/info/semiconductor/literature/alphaahb.pdf -*/ - -typedef enum { - alpha_r0 = 0, - alpha_r1 = 1, - alpha_r2 = 2, - alpha_r3 = 3, - alpha_r4 = 4, - alpha_r5 = 5, - alpha_r6 = 6, - alpha_r7 = 7, - alpha_r8 = 8, - alpha_r9 = 9, - alpha_r10 = 10, - alpha_r11 = 11, - alpha_r12 = 12, - alpha_r13 = 13, - alpha_r14 = 14, - alpha_r15 = 15, - alpha_r16 = 16, - alpha_r17 = 17, - alpha_r18 = 18, - alpha_r19 = 19, - alpha_r20 = 20, - alpha_r21 = 21, - alpha_r22 = 22, - alpha_r23 = 23, - alpha_r24 = 24, - alpha_r25 = 25, - alpha_r26 = 26, - alpha_r27 = 27, - alpha_r28 = 28, - alpha_r29 = 29, - alpha_r30 = 30, - alpha_r31 = 31, alpha_zero = 31, - /* aliases */ - alpha_v0 = 0, /* return value */ - - alpha_t0 = 1, /* temporaries */ - alpha_t1 = 2, - alpha_t2 = 3, - alpha_t3 = 4, - alpha_t4 = 5, - alpha_t5 = 6, - alpha_t6 = 7, - alpha_t7 = 8, - - alpha_s0 = 9, /* saved registers */ - alpha_s1 = 10, - alpha_s2 = 11, - alpha_s3 = 12, - alpha_s4 = 13, - alpha_s5 = 14, - alpha_s6 = 15, - - alpha_fp = 15, /* frame pointer */ - - alpha_a0 = 16, /* argument registers */ - alpha_a1 = 17, - alpha_a2 = 18, - alpha_a3 = 19, - alpha_a4 = 20, - alpha_a5 = 21, - - alpha_t8 = 22, /* temporaries */ - alpha_t9 = 23, - alpha_t10 = 24, - alpha_t11 = 25, - - alpha_ra = 26, /* Return Address */ - - alpha_pv = 27, /* pv current procedure */ - alpha_t12 = 27, /* temp 12 */ - - alpha_altreg = 28, - alpha_at = 28, - - alpha_gp = 29, /* Global Pointer */ - alpha_sp = 30, /* Stack Pointer */ -} AlphaRegister; - -typedef enum { - /* floating point registers */ - alpha_f0 = 0, - alpha_f1 = 1, - alpha_f2 = 2, - alpha_f3 = 3, - alpha_f4 = 4, - alpha_f5 = 5, - alpha_f6 = 6, - alpha_f7 = 7, - alpha_f8 = 8, - alpha_f9 = 9, - alpha_f10 = 10, - alpha_f11 = 11, - alpha_f12 = 12, - alpha_f13 = 13, - alpha_f14 = 14, - alpha_f15 = 15, - alpha_f16 = 16, - alpha_f17 = 17, - alpha_f18 = 18, - alpha_f19 = 19, - alpha_f20 = 20, - alpha_f21 = 21, - alpha_f22 = 22, - alpha_f23 = 23, - alpha_f24 = 24, - alpha_f25 = 25, - alpha_f26 = 26, - alpha_f27 = 27, - alpha_f28 = 28, - alpha_f29 = 29, - alpha_f30 = 30, - alpha_f31 = 31, alpha_fzero = 31, - /* aliases */ - alpha_fv0 = 0, /* return value */ - alpha_fv1 = 1, - - alpha_fs0 = 2, /* saved registers */ - alpha_fs1 = 3, - alpha_fs2 = 4, - alpha_fs3 = 5, - alpha_fs4 = 6, - alpha_fs5 = 7, - alpha_fs6 = 8, - alpha_fs7 = 9, - - alpha_ft0 = 10, /* temporary */ - alpha_ft1 = 11, - alpha_ft2 = 12, - alpha_ft3 = 13, - alpha_ft4 = 14, - alpha_ft5 = 15, - - alpha_fa0 = 16, /* args */ - alpha_fa1 = 17, - alpha_fa2 = 18, - alpha_fa3 = 19, - alpha_fa4 = 20, - alpha_fa5 = 21, - - alpha_ft6 = 22, - alpha_ft7 = 23, - alpha_ft8 = 24, - alpha_ft9 = 25, - alpha_ft10 = 26, - alpha_ft11 = 27, - alpha_ft12 = 28, - alpha_ft13 = 29, - alpha_ft14 = 30 -} AlphaFPRegister; - -/***************************************/ - -#define __alpha_int_32 unsigned int - -/***************************************/ -#define AXP_OFF26_MASK 0x03ffffff -#define AXP_OFF21_MASK 0x01fffff -#define AXP_OFF16_MASK 0x0ffff -#define AXP_OFF14_MASK 0x03fff -#define AXP_OFF13_MASK 0x01fff -#define AXP_OFF11_MASK 0x07ff -#define AXP_OFF8_MASK 0x0ff -#define AXP_OFF7_MASK 0x07f -#define AXP_OFF6_MASK 0x03f -#define AXP_OFF5_MASK 0x01f -#define AXP_OFF4_MASK 0x0f -#define AXP_OFF2_MASK 0x03 -#define AXP_OFF1_MASK 0x01 - - -#define AXP_REG_MASK AXP_OFF5_MASK -#define AXP_REGSIZE 5 - -#define AXP_OP_SHIFT 26 -#define AXP_REG1_SHIFT 21 -#define AXP_REG2_SHIFT 16 -#define AXP_MEM_BR_SHIFT 14 -#define AXP_LIT_SHIFT 13 - -/* encode registers */ -#define alpha_opcode( op ) \ - ((op&AXP_OFF6_MASK) << AXP_OP_SHIFT) - -/* encode registers */ -#define alpha_reg_a( reg ) \ - ((reg & AXP_REG_MASK) << AXP_REG1_SHIFT) - -#define alpha_reg_b( reg ) \ - ((reg & AXP_REG_MASK) << AXP_REG2_SHIFT) - -#define alpha_reg_c( reg ) \ - (reg & AXP_REG_MASK) - - - -/* encode function codes */ -#define alpha_fp_func( func ) \ - ((func & AXP_OFF11_MASK) << AXP_REGSIZE) - -#define alpha_op_func( func ) \ - ((func & AXP_OFF7_MASK) << AXP_REGSIZE) - -#define alpha_op_literal( lit ) \ - ((lit & AXP_OFF8_MASK) << AXP_LIT_SHIFT) - -#define alpha_mem_br_func( func, hint ) \ - (((func & AXP_OFF2_MASK ) << AXP_MEM_BR_SHIFT ) | (hint&AXP_OFF14_MASK)) - -#define alpha_mem_fc_func( func ) \ - (func && AXP_OFF16_MASK) - - -#define alpha_encode_hw4_mem( op, func ) \ - (alpha_opcode( op ) | (( func & 0x0f ) << 12)) - -#define alpha_encode_hw5_mem( op, func ) \ - (alpha_opcode( op ) | (( func & 0x3f ) << 10)) - -#define alpha_encode_hw6mem( op, func ) \ - (alpha_opcode( op ) | (( func & 0x0f ) << 12)) - -#define alpha_encode_hw6mem_br( op, func ) \ - (alpha_opcode( op ) | (( func & 0x07 ) << 13)) - - -/*****************************************/ - - -#define alpha_encode_palcall( ins, op, func ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | ( func & AXP_OFF26_MASK )),\ - ((__alpha_int_32*)(ins))++ - -#define alpha_encode_mem( ins, op, Rdest, Rsrc, offset ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | (offset & AXP_OFF16_MASK )),\ - ((__alpha_int_32*)(ins))++ - -#define alpha_encode_mem_fc( ins, op, func, Rdest, Rsrc, offset ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | alpha_mem_fc_func( func )),\ - *((__alpha_int_32*)(ins))++ - -#define alpha_encode_mem_br( ins, op, func, Rdest, Rsrc, hint ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rdest ) | \ - alpha_reg_b( Rsrc ) | alpha_mem_br_func( func, hint ) ),\ - ((__alpha_int_32*)(ins))++ - -#define alpha_encode_branch( ins, op, Reg, offset ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Reg ) | \ - (offset & AXP_OFF21_MASK )),\ - ((__alpha_int_32*)(ins))++ - -#define alpha_encode_op( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ - alpha_reg_b( Rsrc2 ) | alpha_op_func( func ) | \ - alpha_reg_c( Rdest )),\ - ((__alpha_int_32*)(ins))++ - - -#define alpha_encode_opl( ins, op, func, Rsrc, lit, Rdest ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rsrc ) | \ - alpha_op_literal(lit) | ( 1 << 12 ) | \ - alpha_op_func( func ) | alpha_reg_c( Rdest ) ),\ - ((__alpha_int_32*)(ins))++ - - -#define alpha_encode_fpop( ins, op, func, Rsrc1, Rsrc2, Rdest ) \ - *((__alpha_int_32*)(ins)) = ( 0 |\ - alpha_opcode( op ) | alpha_reg_a( Rsrc1 ) | \ - alpha_reg_b( Rsrc2 ) | alpha_fp_func( func ) | \ - alpha_reg_c( Rdest )),\ - ((__alpha_int_32*)(ins))++ - - -/***************************************/ - -/* pal calls */ -/* #define alpha_halt( ins ) alpha_encode_palcall( ins, 0, 0 ) */ - -#define alpha_call_pal( ins, func ) alpha_encode_palcall( ins, 0, func ) - -/*memory*/ -#define alpha_lda( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x08, Rdest, Rsrc, offset ) -#define alpha_ldah( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x09, Rdest, Rsrc, offset ) -#define alpha_ldbu( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0a, Rdest, Rsrc, offset ) -#define alpha_ldq_u( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0b, Rdest, Rsrc, offset ) -#define alpha_ldwu( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0c, Rdest, Rsrc, offset ) -#define alpha_stw( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0d, Rdest, Rsrc, offset ) -#define alpha_stb( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0e, Rdest, Rsrc, offset ) -#define alpha_stq_u( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x0f, Rdest, Rsrc, offset ) - -#ifdef __VAX__ -#define alpha_ldf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x20, Rdest, Rsrc, offset ) -#define alpha_ldg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x21, Rdest, Rsrc, offset ) -#define alpha_stf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x24, Rdest, Rsrc, offset ) -#define alpha_stg( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x25, Rdest, Rsrc, offset ) -#endif - -#define alpha_lds( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x22, Rdest, Rsrc, offset ) -#define alpha_ldt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) -#define alpha_ldqf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x23, Rdest, Rsrc, offset ) - -#define alpha_sts( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x26, Rdest, Rsrc, offset ) -#define alpha_stt( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x27, Rdest, Rsrc, offset ) -#define alpha_stqf( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x27, Rdest, Rsrc, offset ) - - -#define alpha_ldl( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x28, Rdest, Rsrc, offset ) -#define alpha_ldq( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x29, Rdest, Rsrc, offset ) -#define alpha_ldl_l( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2A, Rdest, Rsrc, offset ) -#define alpha_ldq_l( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2B, Rdest, Rsrc, offset ) -#define alpha_stl( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2C, Rdest, Rsrc, offset ) -#define alpha_stq( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2D, Rdest, Rsrc, offset ) -#define alpha_stl_c( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2E, Rdest, Rsrc, offset ) -#define alpha_stq_c( ins, Rdest, Rsrc, offset ) alpha_encode_mem( ins, 0x2F, Rdest, Rsrc, offset ) - - -/* branch*/ -#define alpha_jmp( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x0, Rdest, Rsrc, hint ) -#define alpha_jsr( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x1, Rdest, Rsrc, hint ) -#define alpha_ret( ins, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x2, alpha_zero, Rsrc, hint ) -#define alpha_jsrco( ins, Rdest, Rsrc, hint ) alpha_encode_mem_br( ins, 0x1A, 0x3, Rdest, Rsrc, hint ) - -#define alpha_br( ins, Reg, offset ) alpha_encode_branch( ins, 0x30, Reg, offset ) -#define alpha_fbeq( ins, Reg, offset ) alpha_encode_branch( ins, 0x31, Reg, offset ) -#define alpha_fblt( ins, Reg, offset ) alpha_encode_branch( ins, 0x32, Reg, offset ) -#define alpha_fble( ins, Reg, offset ) alpha_encode_branch( ins, 0x33, Reg, offset ) -#define alpha_bsr( ins, Reg, offset ) alpha_encode_branch( ins, 0x34, Reg, offset ) -#define alpha_fbne( ins, Reg, offset ) alpha_encode_branch( ins, 0x35, Reg, offset ) -#define alpha_fbge( ins, Reg, offset ) alpha_encode_branch( ins, 0x36, Reg, offset ) -#define alpha_fbgt( ins, Reg, offset ) alpha_encode_branch( ins, 0x37, Reg, offset ) -#define alpha_blbc( ins, Reg, offset ) alpha_encode_branch( ins, 0x38, Reg, offset ) -#define alpha_beq( ins, Reg, offset ) alpha_encode_branch( ins, 0x39, Reg, offset ) -#define alpha_blt( ins, Reg, offset ) alpha_encode_branch( ins, 0x3A, Reg, offset ) -#define alpha_ble( ins, Reg, offset ) alpha_encode_branch( ins, 0x3B, Reg, offset ) -#define alpha_blbs( ins, Reg, offset ) alpha_encode_branch( ins, 0x3C, Reg, offset ) -#define alpha_bne( ins, Reg, offset ) alpha_encode_branch( ins, 0x3D, Reg, offset ) -#define alpha_bge( ins, Reg, offset ) alpha_encode_branch( ins, 0x3E, Reg, offset ) -#define alpha_bgt( ins, Reg, offset ) alpha_encode_branch( ins, 0x3F, Reg, offset ) - - -/* integer */ -/*//#define alpha_sextl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x00, Rsrc1, Rsrc2, Rdest ) -//#define alpha_sextl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x00, Rsrc1, lit, Rdest ) -*/ -#define alpha_addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x00, Rsrc1, Rsrc2, Rdest ) -#define alpha_addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x00, Rsrc1, lit, Rdest ) -#define alpha_s4addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x02, Rsrc1, Rsrc2, Rdest ) -#define alpha_s4addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x02, Rsrc1, lit, Rdest ) -//#define alpha_negl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x09, Rsrc1, Rsrc2, Rdest ) -//#define alpha_negl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x09, Rsrc1, lit, Rdest ) -#define alpha_subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x09, Rsrc1, Rsrc2, Rdest ) -#define alpha_subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x09, Rsrc1, lit, Rdest ) -#define alpha_s4subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x0B, Rsrc1, Rsrc2, Rdest ) -#define alpha_s4subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x0B, Rsrc1, lit, Rdest ) -#define alpha_cmpbge( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x0F, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpbge_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x0F, Rsrc1, lit, Rdest ) -#define alpha_s8addl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x12, Rsrc1, Rsrc2, Rdest ) -#define alpha_s8addl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x12, Rsrc1, lit, Rdest ) -#define alpha_s8subl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x1B, Rsrc1, Rsrc2, Rdest ) -#define alpha_s8subl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x1B, Rsrc1, lit, Rdest ) -#define alpha_cmpult( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x1d, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpult_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x1d, Rsrc1, lit, Rdest ) -#define alpha_addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x20, Rsrc1, Rsrc2, Rdest ) -#define alpha_addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x20, Rsrc1, lit, Rdest ) -#define alpha_s4addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x22, Rsrc1, Rsrc2, Rdest ) -#define alpha_s4addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x22, Rsrc1, lit, Rdest ) -//#define alpha_negq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x29, Rsrc1, Rsrc2, Rdest ) -//#define alpha_negq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x29, Rsrc1, lit, Rdest ) -#define alpha_subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x29, Rsrc1, Rsrc2, Rdest ) -#define alpha_subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x29, Rsrc1, lit, Rdest ) -#define alpha_s4subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x2B, Rsrc1, Rsrc2, Rdest ) -#define alpha_s4subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x2B, Rsrc1, lit, Rdest ) -#define alpha_cmpeq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x2D, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpeq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x2D, Rsrc1, lit, Rdest ) -#define alpha_s8addq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x32, Rsrc1, Rsrc2, Rdest ) -#define alpha_s8addq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x32, Rsrc1, lit, Rdest ) -#define alpha_s8subq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x3B, Rsrc1, Rsrc2, Rdest ) -#define alpha_s8subq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x3B, Rsrc1, lit, Rdest ) -#define alpha_cmpule( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x3D, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpule_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x3D, Rsrc1, lit, Rdest ) -#define alpha_addlv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x40, Rsrc1, Rsrc2, Rdest ) -#define alpha_addlv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x40, Rsrc1, lit, Rdest ) -//#define alpha_neglv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x49, Rsrc1, Rsrc2, Rdest ) -//#define alpha_neglv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x49, Rsrc1, lit, Rdest ) -#define alpha_sublv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x49, Rsrc1, Rsrc2, Rdest ) -#define alpha_sublv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x49, Rsrc1, lit, Rdest ) -#define alpha_cmplt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x4D, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmplt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x4D, Rsrc1, lit, Rdest ) -#define alpha_addqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x60, Rsrc1, Rsrc2, Rdest ) -#define alpha_addqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x60, Rsrc1, lit, Rdest ) -//#define alpha_negqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x69, Rsrc1, Rsrc2, Rdest ) -//#define alpha_negqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x69, Rsrc1, lit, Rdest ) -#define alpha_subqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x69, Rsrc1, Rsrc2, Rdest ) -#define alpha_subqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x69, Rsrc1, lit, Rdest ) -#define alpha_cmple( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x10, 0x6D, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmple_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x10, 0x6D, Rsrc1, lit, Rdest ) - -#define alpha_and( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x00, Rsrc1, Rsrc2, Rdest ) -#define alpha_and_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x00, Rsrc1, lit, Rdest ) -//#define alpha_andnot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x08, Rsrc1, Rsrc2, Rdest ) -//#define alpha_andnot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x08, Rsrc1, lit, Rdest ) -#define alpha_bic( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x08, Rsrc1, Rsrc2, Rdest ) -#define alpha_bic_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x08, Rsrc1, lit, Rdest ) -#define alpha_cmovlbs( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x14, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovlbs_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x14, Rsrc1, lit, Rdest ) -#define alpha_cmovlbc( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x16, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovlbc_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x16, Rsrc1, lit, Rdest ) -#define alpha_nop( ins ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, alpha_zero, alpha_zero ) -#define alpha_clr( ins, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, alpha_zero, Rdest ) -#define alpha_mov1( ins, Rsrc, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, Rsrc, Rdest ) -#define alpha_mov2( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) -#define alpha_mov_( ins, lit, Rdest ) alpha_encode_op( ins, 0x11, 0x20, alpha_zero, lit, Rdest ) -//#define alpha_or( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) -//#define alpha_or_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x20, Rsrc1, lit, Rdest ) -#define alpha_bis( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x20, Rsrc1, Rsrc2, Rdest ) -#define alpha_bis_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x20, Rsrc1, lit, Rdest ) -#define alpha_cmoveq( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x24, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmoveq_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x24, Rsrc1, lit, Rdest ) -#define alpha_cmovne( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x26, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovne_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x26, Rsrc1, lit, Rdest ) -#define alpha_not( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, alpha_zero, Rsrc2, Rdest ) -#define alpha_not_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, alpha_zero, lit, Rdest ) -#define alpha_ornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x28, Rsrc1, Rsrc2, Rdest ) -#define alpha_ornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x28, Rsrc1, lit, Rdest ) -#define alpha_xor( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x40, Rsrc1, Rsrc2, Rdest ) -#define alpha_xor_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x40, Rsrc1, lit, Rdest ) -#define alpha_cmovlt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x44, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovlt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x44, Rsrc1, lit, Rdest ) -#define alpha_cmovge( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x46, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovge_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x46, Rsrc1, lit, Rdest ) -#define alpha_eqv( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x48, Rsrc1, Rsrc2, Rdest ) -#define alpha_eqv_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) -//#define alpha_xornot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x48, Rsrc1, Rsrc2, Rdest ) -//#define alpha_xornot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x48, Rsrc1, lit, Rdest ) -#define alpha_ev56b_amask( ins, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x61, alpha_zero, Rsrc2, Rdest ) -#define alpha_ev56b_amask_( ins, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x61, alpha_zero, lit, Rdest ) -#define alpha_cmovle( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x64, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovle_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x64, Rsrc1, lit, Rdest ) -#define alpha_cmovgt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x66, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovgt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x66, Rsrc1, lit, Rdest ) -//#define alpha_implver_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x6C, Rsrc1, lit, Rdest ) -#define alpha_cmovgt( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x11, 0x66, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmovgt_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x11, 0x66, Rsrc1, lit, Rdest ) - -#define alpha_mskbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x02, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x02, Rsrc1, lit, Rdest ) -#define alpha_extbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x06, Rsrc1, Rsrc2, Rdest ) -#define alpha_extbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x06, Rsrc1, lit, Rdest ) -#define alpha_insbl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x0B, Rsrc1, Rsrc2, Rdest ) -#define alpha_insbl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x0B, Rsrc1, lit, Rdest ) -#define alpha_mskwl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x12, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskwl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x12, Rsrc1, lit, Rdest ) -#define alpha_extwl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x16, Rsrc1, Rsrc2, Rdest ) -#define alpha_extwl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x16, Rsrc1, lit, Rdest ) -#define alpha_inswl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x1b, Rsrc1, Rsrc2, Rdest ) -#define alpha_inswl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x1b, Rsrc1, lit, Rdest ) -#define alpha_mskll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x22, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x22, Rsrc1, lit, Rdest ) -#define alpha_extll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x26, Rsrc1, Rsrc2, Rdest ) -#define alpha_extll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x26, Rsrc1, lit, Rdest ) -#define alpha_insll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x2b, Rsrc1, Rsrc2, Rdest ) -#define alpha_insll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x2b, Rsrc1, lit, Rdest ) -#define alpha_zap( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x30, Rsrc1, Rsrc2, Rdest ) -#define alpha_zap_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x30, Rsrc1, lit, Rdest ) -#define alpha_zapnot( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x31, Rsrc1, Rsrc2, Rdest ) -#define alpha_zapnot_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x31, Rsrc1, lit, Rdest ) -#define alpha_mskql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x32, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x32, Rsrc1, lit, Rdest ) -#define alpha_srl( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x34, Rsrc1, Rsrc2, Rdest ) -#define alpha_srl_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x34, Rsrc1, lit, Rdest ) -#define alpha_extql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x36, Rsrc1, Rsrc2, Rdest ) -#define alpha_extql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x36, Rsrc1, lit, Rdest ) -#define alpha_sll( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x39, Rsrc1, Rsrc2, Rdest ) -#define alpha_sll_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x39, Rsrc1, lit, Rdest ) -#define alpha_insql( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x3b, Rsrc1, Rsrc2, Rdest ) -#define alpha_insql_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x3b, Rsrc1, lit, Rdest ) -#define alpha_sra( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x3c, Rsrc1, Rsrc2, Rdest ) -#define alpha_sra_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x3c, Rsrc1, lit, Rdest ) -#define alpha_mskwh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x52, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskwh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x52, Rsrc1, lit, Rdest ) -#define alpha_inswh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x57, Rsrc1, Rsrc2, Rdest ) -#define alpha_inswh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x57, Rsrc1, lit, Rdest ) -#define alpha_extwh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x5a, Rsrc1, Rsrc2, Rdest ) -#define alpha_extwh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x5a, Rsrc1, lit, Rdest ) -#define alpha_msklh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x62, Rsrc1, Rsrc2, Rdest ) -#define alpha_msklh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x62, Rsrc1, lit, Rdest ) -#define alpha_inslh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x67, Rsrc1, Rsrc2, Rdest ) -#define alpha_inslh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x67, Rsrc1, lit, Rdest ) -#define alpha_extlh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x6a, Rsrc1, Rsrc2, Rdest ) -#define alpha_extlh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x6a, Rsrc1, lit, Rdest ) -#define alpha_mskqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x72, Rsrc1, Rsrc2, Rdest ) -#define alpha_mskqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x72, Rsrc1, lit, Rdest ) -#define alpha_insqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x77, Rsrc1, Rsrc2, Rdest ) -#define alpha_insqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x77, Rsrc1, lit, Rdest ) -#define alpha_extqh( ins, Rsrc1, Rsrc2, Rdest ) alpha_encode_op( ins, 0x12, 0x7a, Rsrc1, Rsrc2, Rdest ) -#define alpha_extqh_( ins, Rsrc1, lit, Rdest ) alpha_encode_opl( ins, 0x12, 0x7a, Rsrc1, lit, Rdest ) - -#define alpha_mull(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_op( ins, 0x13, 0x00, Rsrc1, Rsrc2, Rdest ) -#define alpha_mull_(ins, Rsrc1, lit, Rdest) alpha_encode_op( ins, 0x13, 0x00, Rsrc1, lit, Rdest ) -#define alpha_mulq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, Rsrc2, Rdest ) -#define alpha_mulq_(ins, Rsrc1, lit, Rdest) alpha_encode_op( ins, 0x13, 0x20, Rsrc1, lit, Rdest ) - -#define alpha_sextb(ins, Rsrc2, Rdest) alpha_encode_op( ins, 0x1c, 0x00, alpha_zero, Rsrc2, Rdest ) -#define alpha_sextw(ins, Rsrc2, Rdest) alpha_encode_op( ins, 0x1c, 0x01, alpha_zero, Rsrc2, Rdest ) - -// For 264 -#define alpha_ftois( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x078, RFsrc, alpha_zero, Rdest ) -#define alpha_ftoit( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) -#define alpha_ftoi_qf( ins, RFsrc, Rdest ) alpha_encode_fpop( ins, 0x1c, 0x070, RFsrc, alpha_zero, Rdest ) -// For 264 -#define alpha_itofs( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x004, Rsrc, alpha_zero, RFdest ) -#define alpha_itoff( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x014, Rsrc, alpha_zero, RFdest ) -#define alpha_itoft( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) -#define alpha_itof_qf( ins, Rsrc, RFdest ) alpha_encode_fpop( ins, 0x14, 0x024, Rsrc, alpha_zero, RFdest ) - -#define alpha_cvtts_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x2C, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvttq_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x2F, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvtqs_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x3C, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvtqt_c(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x3E, alpha_fzero, Rsrc2, Rdest ) - - -#define alpha_adds(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x080, Rsrc1, Rsrc2, Rdest ) -#define alpha_subs(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x081, Rsrc1, Rsrc2, Rdest ) -#define alpha_addt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A0, Rsrc1, Rsrc2, Rdest ) -#define alpha_subt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A1, Rsrc1, Rsrc2, Rdest ) -#define alpha_mult(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A2, Rsrc1, Rsrc2, Rdest ) -#define alpha_divt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A3, Rsrc1, Rsrc2, Rdest ) - -#define alpha_cmptun(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A4, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpteq(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A5, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmptlt(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A6, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmptle(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0A7, Rsrc1, Rsrc2, Rdest ) - -#define alpha_addt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A0, Rsrc1, Rsrc2, Rdest ) -#define alpha_subt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A1, Rsrc1, Rsrc2, Rdest ) - - -#define alpha_cmptun_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A4, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmpteq_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A5, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmptlt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A6, Rsrc1, Rsrc2, Rdest ) -#define alpha_cmptle_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A7, Rsrc1, Rsrc2, Rdest ) - -#define alpha_cvtts(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AC, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvttq(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0AF, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvtqs(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BC, alpha_fzero, Rsrc2, Rdest ) -#define alpha_cvtqt(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x0BE, alpha_fzero, Rsrc2, Rdest ) - -#define alpha_divt_su(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5A3, Rsrc1, Rsrc2, Rdest ) - -#define alpha_cvtts_su(ins, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x16, 0x5AC, alpha_fzero, Rsrc2, Rdest ) - -#define alpha_cpys(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x020, Rsrc1, Rsrc2, Rdest ) -#define alpha_cpysn(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x021, Rsrc1, Rsrc2, Rdest ) -#define alpha_cpyse(ins, Rsrc1, Rsrc2, Rdest) alpha_encode_fpop( ins, 0x17, 0x022, Rsrc1, Rsrc2, Rdest ) - -#define alpha_trapb(ins) alpha_encode_mem_fc( ins, 0x18, 0x0000, 0, 0, 0 ) -#define alpha_mb(ins) alpha_encode_mem_fc( ins, 0x18, 0x4000, 0, 0, 0 ) - -#endif - diff --git a/alpha/test.c b/alpha/test.c deleted file mode 100644 index b922750..0000000 --- a/alpha/test.c +++ /dev/null @@ -1,156 +0,0 @@ -#include "alpha-codegen.h" - -#include -#include -#include -#include -#include -#include -#include - -/* A typical Alpha stack frame looks like this */ -/* -fun: // called from outside the module. - ldgp gp,0(pv) // load the global pointer -fun..ng: // called from inside the module. - lda sp, -SIZE( sp ) // grow the stack downwards. - - stq ra, 0(sp) // save the return address. - - stq s0, 8(sp) // callee-saved registers. - stq s1, 16(sp) // ... - - // Move the arguments to the argument registers... - - mov addr, pv // Load the callee address - jsr ra, (pv) // call the method. - ldgp gp, 0(ra) // restore gp - - // return value is in v0 - - ldq ra, 0(sp) // free stack frame - ldq s0, 8(sp) // restore callee-saved registers. - ldq s1, 16(sp) - ldq sp, 32(sp) // restore stack pointer - - ret zero, (ra), 1 // return. -*/ - - - -// -// Simple function which returns 10. -// -static int testfunc(void) -{ - return 10; -} - -// Write it using the known asm bytecodes. -static unsigned int * write_testfunc_1(unsigned int * p ) -{ -// -// ldah gp, 0(pv) -// lda gp, 0(gp) -//00000001200004d0 : -// 1200004d0: f0 ff de 23 lda sp,-16(sp) -// 1200004d4: 00 00 5e b7 stq ra,0(sp) -// 1200004d8: 08 00 fe b5 stq fp,8(sp) -// 1200004dc: 0f 04 fe 47 mov sp,fp -// 1200004e0: 0a 00 3f 20 lda t0,10 -// 1200004e4: 00 04 e1 47 mov t0,v0 -// 1200004e8: 1e 04 ef 47 mov fp,sp -// 1200004ec: 00 00 5e a7 ldq ra,0(sp) -// 1200004f0: 08 00 fe a5 ldq fp,8(sp) -// 1200004f4: 10 00 de 23 lda sp,16(sp) -// 1200004f8: 01 80 fa 6b ret - -int _func_code[] = { - 0x23defff0, - 0xb75e0000, - 0xb5fe0008, - 0x47fe040f, - 0x203f000a, - 0x47e10400, - 0x47ef041e, - 0xa75e0000, - 0xa5fe0008, - 0x23de0010, - 0x6bfa8001 }; - - memcpy( p , _func_code, 4 * 11 ); - return p + ( 4 * 11 ); -} - -// The same function encoded with alpha-codegen.h -unsigned int * write_testfunc_2( unsigned int * p ) -{ - alpha_ldah( p, alpha_gp, alpha_pv, 0 ); // start the gp load - alpha_lda( p, alpha_sp, alpha_sp, -16 ); // allocate the stack - alpha_lda( p, alpha_gp, alpha_gp, 0 ); // finish the gp load - alpha_stq( p, alpha_ra, alpha_sp, 0 ); // start param save. - alpha_stq( p, alpha_fp, alpha_sp, 8 ); - alpha_mov1( p, alpha_sp, alpha_fp ); - alpha_lda( p, alpha_t0, alpha_zero, 10 ); - alpha_mov1( p, alpha_t0, alpha_v0 ); - alpha_mov1( p, alpha_fp, alpha_sp ); - alpha_ldq( p, alpha_ra, alpha_sp, 0 ); - alpha_ldq( p, alpha_fp, alpha_sp, 8 ); - alpha_lda( p, alpha_sp, alpha_sp, 16 ); - - alpha_ret( p, alpha_ra, 1 ); - - return p; -} - - -void output( char * p, int len ) -{ - char * maxp = p + len; - char * cp = p; - - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - for ( ; cp < maxp; cp++ ) - { - printf (".byte 0x%0.2x\n", (*cp&0x00ff) ); - } - - int fd = open( "bad.out", O_CREAT | O_TRUNC ); - write( fd, p, len ); - close( fd ); -} - -unsigned int code [16000/4]; - -int main( int argc, char ** argv ) { -// unsigned int code [16000/4]; - unsigned int *p = code; - unsigned int * cp; - - int (*x)() = 0; - int y = 0; - int z = 10; - - // so, `test blah` gets you the byte-encoded function. - // and `test` gets you the alpha-codegen.h encoded function. - - if( argc > 1 ) - { - p = write_testfunc_1( p ); - } - else - { - p = write_testfunc_2( p ); - } - - // output( code, p-code ); - - // call the procedure. - x = (int(*)())code; - - while( z-- > 0 ) - y = x(); - - return 0; -} - diff --git a/alpha/tramp.c b/alpha/tramp.c deleted file mode 100644 index 23c3846..0000000 --- a/alpha/tramp.c +++ /dev/null @@ -1,380 +0,0 @@ -/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ -/* - * Create trampolines to invoke arbitrary functions. - * - * Copyright (C) Ximian Inc. - * - * Authors: Laramie Leavitt (lar@leavitt.us) - * - * - */ - -/* A typical Alpha stack frame looks like this */ -/* -fun: // called from outside the module. - ldgp gp,0(pv) // load the global pointer -fun..ng: // called from inside the module. - lda sp, -SIZE( sp ) // grow the stack downwards. - - stq ra, 0(sp) // save the return address. - - stq s0, 8(sp) // callee-saved registers. - stq s1, 16(sp) // ... - - // Move the arguments to the argument registers... - - mov addr, pv // Load the callee address - jsr ra, (pv) // call the method. - ldgp gp, 0(ra) // restore gp - - // return value is in v0 - - ldq ra, 0(sp) // free stack frame - ldq s0, 8(sp) // restore callee-saved registers. - ldq s1, 16(sp) - ldq sp, 32(sp) // restore stack pointer - - ret zero, (ra), 1 // return. - -// min SIZE = 48 -// our call must look like this. - -call_func: - ldgp gp, 0(pv) -call_func..ng: - .prologue - lda sp, -SIZE(sp) // grow stack SIZE bytes. - stq ra, SIZE-48(sp) // store ra - stq fp, SIZE-40(sp) // store fp (frame pointer) - stq a0, SIZE-32(sp) // store args. a0 = func - stq a1, SIZE-24(sp) // a1 = retval - stq a2, SIZE-16(sp) // a2 = this - stq a3, SIZE-8(sp) // a3 = args - mov sp, fp // set frame pointer - mov pv, a0 // func - - .calling_arg_this - mov a1, a2 - - .calling_arg_6plus - ldq t0, POS(a3) - stq t0, 0(sp) - ldq t1, POS(a3) - stq t1, 8(sp) - ... SIZE-56 ... - - mov zero,a1 - mov zero,a2 - mov zero,a3 - mov zero,a4 - mov zero,a5 - - .do_call - jsr ra, (pv) // call func - ldgp gp, 0(ra) // restore gp. - mov v0, t1 // move return value into t1 - - .do_store_retval - ldq t0, SIZE-24(fp) // load retval into t2 - stl t1, 0(t0) // store value. - - .finished - mov fp,sp - ldq ra,SIZE-48(sp) - ldq fp,SIZE-40(sp) - lda sp,SIZE(sp) - ret zero,(ra),1 - - -*/ -/*****************************************************/ - -#include "config.h" -#include -#include - -#include "alpha-codegen.h" - -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/debug-helpers.h" - -#define AXP_GENERAL_REGS 6 -#define AXP_MIN_STACK_SIZE 24 -#define ARG_SIZE sizeof(stackval) -#define ARG_LOC(x) (x * sizeof( stackval ) ) - -/*****************************************************/ - -/* */ -/* void func (void (*callme)(), void *retval, */ -/* void *this_obj, stackval *arguments); */ -static inline unsigned int * -emit_prolog (unsigned int *pi, const gint SIZE, int hasthis ) -{ - unsigned int *p = (unsigned int *)pi; - // 9 instructions. - alpha_ldah( p, alpha_gp, alpha_pv, 0 ); - alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(pv) - alpha_lda( p, alpha_sp, alpha_sp, -((SIZE & 8) ? (SIZE+8) : SIZE) ); // grow stack down SIZE (align to 16 bytes like gcc does) - - /* TODO: we really don't need to store everything. - alpha_a1: We have to store this in order to return the retval. - - alpha_a0: func pointer can be moved directly to alpha_pv - alpha_a3: don't need args after we are finished. - alpha_a2: will be moved into alpha_a0... if hasthis is true. - */ - /* store parameters on stack.*/ - alpha_stq( p, alpha_ra, alpha_sp, (SIZE-24) ); // ra - alpha_stq( p, alpha_fp, alpha_sp, (SIZE-16) ); // fp - alpha_stq( p, alpha_a1, alpha_sp, (SIZE-8) ); // retval - - /* set the frame pointer */ - alpha_mov1( p, alpha_sp, alpha_fp ); - - /* move the args into t0, pv */ - alpha_mov1( p, alpha_a0, alpha_pv ); - alpha_mov1( p, alpha_a3, alpha_t0 ); - - // Move the this pointer into a0. - if( hasthis ) - alpha_mov1( p, alpha_a2, alpha_a0 ); - return p; -} - -static inline unsigned int * -emit_call( unsigned int *pi , const gint SIZE ) -{ - unsigned int *p = (unsigned int *)pi; - - // 3 instructions - /* call func */ - alpha_jsr( p, alpha_ra, alpha_pv, 0 ); // jsr ra, 0(pv) - - /* reload the gp */ - alpha_ldah( p, alpha_gp, alpha_ra, 0 ); - alpha_lda( p, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(ra) - - return p; -} - -static inline unsigned int * -emit_store_return_default(unsigned int *pi, const gint SIZE ) -{ - // 2 instructions. - unsigned int *p = (unsigned int *)pi; - - /* TODO: This probably do different stuff based on the value. - you know, like stq/l/w. and s/f. - */ - alpha_ldq( p, alpha_t0, alpha_fp, (SIZE-8) ); // load void * retval - alpha_stq( p, alpha_v0, alpha_t0, 0 ); // store the result to *retval. - return p; -} - - -static inline unsigned int * -emit_epilog (unsigned int *pi, const gint SIZE ) -{ - unsigned int *p = (unsigned int *)pi; - - // 5 instructions. - alpha_mov1( p, alpha_fp, alpha_sp ); - - /* restore fp, ra, sp */ - alpha_ldq( p, alpha_ra, alpha_sp, (SIZE-24) ); - alpha_ldq( p, alpha_fp, alpha_sp, (SIZE-16) ); - alpha_lda( p, alpha_sp, alpha_sp, ((SIZE & 8) ? (SIZE+8) : SIZE) ); - - /* return */ - alpha_ret( p, alpha_ra, 1 ); - return p; -} - -static void calculate_size(MonoMethodSignature *sig, int * INSTRUCTIONS, int * STACK ) -{ - int alpharegs; - - alpharegs = AXP_GENERAL_REGS - (sig->hasthis?1:0); - - *STACK = AXP_MIN_STACK_SIZE; - *INSTRUCTIONS = 20; // Base: 20 instructions. - - if( sig->param_count - alpharegs > 0 ) - { - *STACK += ARG_SIZE * (sig->param_count - alpharegs ); - // plus 3 (potential) for each stack parameter. - *INSTRUCTIONS += ( sig->param_count - alpharegs ) * 3; - // plus 2 (potential) for each register parameter. - *INSTRUCTIONS += ( alpharegs * 2 ); - } - else - { - // plus 2 (potential) for each register parameter. - *INSTRUCTIONS += ( sig->param_count * 2 ); - } -} - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - unsigned int *p; - unsigned int *buffer; - MonoType* param; - - int i, pos; - int alpharegs; - int hasthis; - int STACK_SIZE; - int BUFFER_SIZE; - int simple_type; - int regbase; - - // Set up basic stuff. like has this. - hasthis = !!sig->hasthis; - alpharegs = AXP_GENERAL_REGS - hasthis; - regbase = hasthis?alpha_a1:alpha_a0 ; - - // Make a ballpark estimate for now. - calculate_size( sig, &BUFFER_SIZE, &STACK_SIZE ); - - // convert to the correct number of bytes. - BUFFER_SIZE = BUFFER_SIZE * 4; - - - // allocate. - buffer = p = (unsigned int *)malloc(BUFFER_SIZE); - memset( buffer, 0, BUFFER_SIZE ); - pos = 8 * (sig->param_count - alpharegs - 1); - - // Ok, start creating this thing. - p = emit_prolog( p, STACK_SIZE, hasthis ); - - // copy everything into the correct register/stack space - for (i = sig->param_count; --i >= 0; ) - { - param = sig->params [i]; - - if( param->byref ) - { - if( i >= alpharegs ) - { - // load into temp register, then store on the stack - alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i )); - alpha_stq( p, alpha_t1, alpha_sp, pos ); - pos -= 8; - } - else - { - // load into register - alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC( i ) ); - } - } - else - { - simple_type = param->type; - if( simple_type == MONO_TYPE_VALUETYPE ) - { - if (param->data.klass->enumtype) - simple_type = param->data.klass->enum_basetype->type; - } - - switch (simple_type) - { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - // 4 bytes - need to sign-extend (stackvals are not extended) - if( i >= alpharegs ) - { - // load into temp register, then store on the stack - alpha_ldl( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); - alpha_stq( p, alpha_t1, alpha_sp, pos ); - pos -= 8; - } - else - { - // load into register - alpha_ldl( p, (regbase + i), alpha_t0, (ARG_LOC(i)) ); - } - break; - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - // 8 bytes - if( i >= alpharegs ) - { - // load into temp register, then store on the stack - alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); - alpha_stq( p, alpha_t1, alpha_sp, pos ); - pos -= 8; - } - else - { - // load into register - alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC(i) ); - } - break; - case MONO_TYPE_R4: - case MONO_TYPE_R8: - /* - // floating point... Maybe this does the correct thing. - if( i > alpharegs ) - { - alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); - alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_ft2 ); - alpha_stt( p, alpha_ft2, alpha_sp, pos ); - pos -= 8; - } - else - { - alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC(i) ); - alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_fa0 + i + hasthis ); - } - break; - */ - case MONO_TYPE_VALUETYPE: - g_error ("Not implemented: ValueType as parameter to delegate." ); - break; - default: - g_error( "Not implemented: 0x%x.", simple_type ); - break; - } - } - } - - // Now call the function and store the return parameter. - p = emit_call( p, STACK_SIZE ); - p = emit_store_return_default( p, STACK_SIZE ); - p = emit_epilog( p, STACK_SIZE ); - - if( p > buffer + BUFFER_SIZE ) - g_error( "Buffer overflow: got 0x%lx, expected <=0x%x.", (long)(p-buffer), BUFFER_SIZE ); - - /* flush instruction cache to see trampoline code */ - asm volatile("imb":::"memory"); - - return (MonoPIFunc)buffer; -} - -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - g_error ("Unsupported arch"); - return NULL; -} -- cgit v1.1 From 0d9d79945bfc7e791ed39e7519b8769a3c09fe28 Mon Sep 17 00:00:00 2001 From: Elijah Taylor Date: Thu, 31 Jan 2013 12:48:49 -0800 Subject: NaCl GC improvements - inline managed code implementation (add x86 test mem imm8 codegen macro for this as well) - clean up libgc NaCl code - centralize mono_nacl_gc into mini.c --- x86/x86-codegen.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index fd2c528..ced466e 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -814,6 +814,14 @@ typedef union { x86_imm_emit32 ((inst), (imm)); \ } while (0) +#define x86_test_mem_imm8(inst,mem,imm) \ + do { \ + x86_codegen_pre(&(inst), 7); \ + *(inst)++ = (unsigned char)0xf6; \ + x86_mem_emit ((inst), 0, (mem)); \ + x86_imm_emit8 ((inst), (imm)); \ + } while (0) + #define x86_test_mem_imm(inst,mem,imm) \ do { \ x86_codegen_pre(&(inst), 10); \ -- cgit v1.1 From 92b3dc346aad94e7e6a91e7356adcebbb180c618 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Mon, 22 Apr 2013 17:54:27 +0200 Subject: Remove obsolete 32 bit s390 backend. --- Makefile.am | 2 +- s390/.gitignore | 4 - s390/ChangeLog | 19 - s390/Makefile.am | 7 - s390/s390-codegen.h | 696 ------------------------------- s390/tramp.c | 1154 --------------------------------------------------- 6 files changed, 1 insertion(+), 1881 deletions(-) delete mode 100644 s390/.gitignore delete mode 100644 s390/ChangeLog delete mode 100644 s390/Makefile.am delete mode 100644 s390/s390-codegen.h delete mode 100644 s390/tramp.c diff --git a/Makefile.am b/Makefile.am index 0bedf77..e7700ed 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -DIST_SUBDIRS = x86 ppc sparc arm s390 s390x amd64 ia64 mips +DIST_SUBDIRS = x86 ppc sparc arm s390x amd64 ia64 mips AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/s390/.gitignore b/s390/.gitignore deleted file mode 100644 index 6abcd22..0000000 --- a/s390/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/Makefile.in -/Makefile -/.deps -/.cvsignore diff --git a/s390/ChangeLog b/s390/ChangeLog deleted file mode 100644 index 9b41109..0000000 --- a/s390/ChangeLog +++ /dev/null @@ -1,19 +0,0 @@ -2007-04-12 Neale Ferguson - - * tramp.c: Add MONO_TYPE_PTR case. - -2005-12-13 Neale Ferguson - - * s390-codegen.h: Add some new instructions (conditional jumps) - -2004-12-15 Neale Ferguson - - * s390-codegen.h: Add some new instructions (CS, CDS) - -2004-11-15 Neale Ferguson - - * s390-codegen.h: Minor macro modifications - -2004-07-30 Neale Ferguson - - * s390-codegen.h: reworked macros for code generation. diff --git a/s390/Makefile.am b/s390/Makefile.am deleted file mode 100644 index d8ebb6f..0000000 --- a/s390/Makefile.am +++ /dev/null @@ -1,7 +0,0 @@ - -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-s390.la - -libmonoarch_s390_la_SOURCES = tramp.c s390-codegen.h - diff --git a/s390/s390-codegen.h b/s390/s390-codegen.h deleted file mode 100644 index 6457357..0000000 --- a/s390/s390-codegen.h +++ /dev/null @@ -1,696 +0,0 @@ -/* - Copyright (C) 2001 Radek Doulik -*/ - -#ifndef S390_H -#define S390_H -#include -#include - -#define FLOAT_REGS 2 /* No. float registers for parms */ -#define GENERAL_REGS 5 /* No. general registers for parms */ - -#define ARG_BASE s390_r10 /* Register for addressing arguments*/ -#define STKARG \ - (i*(sizeof(stackval))) /* Displacement of ith argument */ - -#define MINV_POS 96 /* MonoInvocation stack offset */ -#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) -#define OBJ_POS 8 -#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) - -#define MIN_CACHE_LINE 256 - -/*------------------------------------------------------------------*/ -/* Sequence to add an int/long long to parameters to stack_from_data*/ -/*------------------------------------------------------------------*/ -#define ADD_ISTACK_PARM(r, i) \ - if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param += (i); \ - } else { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ - stack_param += (i); \ - } - -/*------------------------------------------------------------------*/ -/* Sequence to add a float/double to parameters to stack_from_data */ -/*------------------------------------------------------------------*/ -#define ADD_RSTACK_PARM(i) \ - if (fpr_param < FLOAT_REGS) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - float_pos + (fpr_param * sizeof(float) * (i))); \ - fpr_param++; \ - } else { \ - stack_param += (stack_param % (i)); \ - s390_la (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \ - stack_param += (i); \ - } - -/*------------------------------------------------------------------*/ -/* Sequence to add a structure ptr to parameters to stack_from_data */ -/*------------------------------------------------------------------*/ -#define ADD_TSTACK_PARM \ - if (reg_param < GENERAL_REGS) { \ - s390_l (p, s390_r4, 0, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param++; \ - } else { \ - s390_l (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ - stack_param++; \ - } - -#define ADD_PSTACK_PARM(r, i) \ - if (reg_param < GENERAL_REGS-(r)) { \ - s390_la (p, s390_r4, 0, STK_BASE, \ - local_start + (reg_param - this_flag) * sizeof(long)); \ - reg_param += (i); \ - } else { \ - s390_l (p, s390_r4, 0, STK_BASE, \ - sz.stack_size + MINV_POS + stack_param * sizeof(long)); \ - stack_param++; \ - } -typedef enum { - s390_r0 = 0, - s390_r1, - s390_r2, - s390_r3, - s390_r4, - s390_r5, - s390_r6, - s390_r7, - s390_r8, - s390_r9, - s390_r10, - s390_r11, - s390_r12, - s390_r13, - s390_r14, - s390_r15, -} S390IntRegister; - -typedef enum { - s390_f0 = 0, - s390_f1, - s390_f2, - s390_f3, - s390_f4, - s390_f5, - s390_f6, - s390_f7, - s390_f8, - s390_f9, - s390_f10, - s390_f11, - s390_f12, - s390_f13, - s390_f14, - s390_f15, -} S390FloatRegister; - -typedef enum { - s390_a0 = 0, - s390_a1, - s390_a2, - s390_a3, - s390_a4, - s390_a5, - s390_a6, - s390_a7, - s390_a8, - s390_a9, - s390_a10, - s390_a11, - s390_a12, - s390_a13, - s390_a14, - s390_a15, -} S390AccRegister; - -typedef enum { - s390_fpc = 256, -} S390SpecialRegister; - -#define s390_is_imm16(val) ((gint)val >= (gint)-(1<<15) && \ - (gint)val <= (gint)((1<<15)-1)) -#define s390_is_uimm16(val) ((gint)val >= 0 && (gint)val <= 32767) -#define s390_is_imm12(val) ((gint)val >= (gint)-(1<<11) && \ - (gint)val <= (gint)((1<<11)-1)) -#define s390_is_uimm12(val) ((gint)val >= 0 && (gint)val <= 4095) - -#define STK_BASE s390_r15 -#define S390_MINIMAL_STACK_SIZE 96 -#define S390_PARM_SAVE_OFFSET 8 -#define S390_REG_SAVE_OFFSET 24 -#define S390_RET_ADDR_OFFSET 56 -#define S390_FLOAT_SAVE_OFFSET 64 - -#define S390_CC_ZR 8 -#define S390_CC_NE 7 -#define S390_CC_NZ 7 -#define S390_CC_LT 4 -#define S390_CC_GT 2 -#define S390_CC_GE 11 -#define S390_CC_NM 11 -#define S390_CC_LE 13 -#define S390_CC_OV 1 -#define S390_CC_NO 14 -#define S390_CC_CY 3 -#define S390_CC_NC 12 -#define S390_CC_UN 15 - -#define s390_word(addr, value) do \ -{ \ - * (guint32 *) addr = (guint32) value; \ - addr += sizeof(guint32); \ -} while (0) - -#define s390_float(addr, value) do \ -{ \ - * (gfloat *) addr = (gfloat) value; \ - addr += sizeof(gfloat); \ -} while (0) - -#define s390_llong(addr, value) do \ -{ \ - * (guint64 *) addr = (guint64) value; \ - addr += sizeof(guint64); \ -} while (0) - -#define s390_double(addr, value) do \ -{ \ - * (gdouble *) addr = (gdouble) value; \ - addr += sizeof(gdouble); \ -} while (0) - -typedef struct { - short op; -} E_Format; - -typedef struct { - char op; - int im; -} I_Format; - -typedef struct { - char op; - char r1 : 4; - char r2 : 4; -} RR_Format; - -typedef struct { - short op; - char xx; - char r1 : 4; - char r2 : 4; -} RRE_Format; - -typedef struct { - short op; - char r1 : 4; - char xx : 4; - char r3 : 4; - char r2 : 4; -} RRF_Format_1; - -typedef struct { - short op; - char m3 : 4; - char xx : 4; - char r1 : 4; - char r2 : 4; -} RRF_Format_2; - -typedef struct { - short op; - char r3 : 4; - char m4 : 4; - char r1 : 4; - char r2 : 4; -} RRF_Format_3; - -typedef struct { - char op; - char r1 : 4; - char x2 : 4; - char b2 : 4; - short d2 : 12; -} RX_Format; - -typedef struct { - char op1; - char r1 : 4; - char x2 : 4; - char b2 : 4; - int d2 : 12; - char xx; - char op2; -} RXE_Format; - -typedef struct { - char op1; - char r3 : 4; - char x2 : 4; - char b2 : 4; - int d2 : 12; - char r1 : 4; - char xx : 4; - char op2; -} RXF_Format; - -typedef struct __attribute__ ((packed)) { - char op1; - char r1 : 4; - char x2 : 4; - char b2 : 4; - int d2 : 20; - char op2; -} RXY_Format; - -typedef struct { - char op; - char r1 : 4; - char r3 : 4; - char b2 : 4; - int d2 : 12; -} RS_Format_1; - -typedef struct { - char op; - char r1 : 4; - char m3 : 4; - char b2 : 4; - int d2 : 12; -} RS_Format_2; - -typedef struct { - char op; - char r1 : 4; - char xx : 4; - char b2 : 4; - int d2 : 12; -} RS_Format_3; - -typedef struct __attribute__ ((packed)) { - char op1; - char r1 : 4; - char r3 : 4; - char b2 : 4; - int d2 : 20; - char op2; -} RSY_Format_1; - -typedef struct __attribute__ ((packed)) { - char op1; - char r1 : 4; - char m3 : 4; - char b2 : 4; - int d2 : 20; - char op2; -} RSY_Format_2; - -typedef struct { - char op1; - char l1 : 4; - char xx : 4; - char b1 : 4; - int d1 : 12; - char yy; - char op2; -} RSL_Format; - -typedef struct { - char op; - char r1 : 4; - char r3 : 4; - short i2; -} RSI_Format; - -typedef struct { - char op1; - char r1 : 4; - char op2 : 4; - short i2; -} RI_Format; - -typedef struct { - char op1; - char r1 : 4; - char r3 : 4; - short i2; - char xx; - char op2; -} RIE_Format; - -typedef struct __attribute__ ((packed)) { - char op1; - char r1 : 4; - char op2 : 4; - int i2; -} RIL_Format_1; - -typedef struct __attribute__ ((packed)) { - char op1; - char m1 : 4; - char op2 : 4; - int i2; -} RIL_Format_2; - -typedef struct { - char op; - char i2; - char b1 : 4; - short d1 : 12; -} SI_Format; - -typedef struct __attribute__ ((packed)) { - char op1; - char i2; - char b1 : 4; - int d1 : 20; - char op2; -} SIY_Format; - -typedef struct { - short op; - char b2 : 4; - short d2 : 12; -} S_Format; - -typedef struct { - char op; - char ll; - char b1 : 4; - short d1 : 12; - char b2 : 4; - short d2 : 12; -} SS_Format_1; - -typedef struct { - char op; - char l1 : 4; - char l2 : 4; - char b1 : 4; - short d1 : 12; - char b2 : 4; - short d2 : 12; -} SS_Format_2; - -typedef struct { - char op; - char r1 : 4; - char r3 : 4; - char b1 : 4; - short d1 : 12; - char b2 : 4; - short d2 : 12; -} SS_Format_3; - -typedef struct { - char op; - char r1 : 4; - char r3 : 4; - char b2 : 4; - short d2 : 12; - char b4 : 4; - short d4 : 12; -} SS_Format_4; - -typedef struct __attribute__ ((packed)) { - short op; - char b1 : 4; - short d1 : 12; - char b2 : 4; - short d2 : 12; -} SSE_Format; - -#define s390_emit16(c, x) do \ -{ \ - *((guint16 *) c) = x; \ - c += sizeof(guint16); \ -} while(0) - -#define s390_emit32(c, x) do \ -{ \ - *((guint32 *) c) = x; \ - c += sizeof(guint32); \ -} while(0) - -#define S390_E(c,opc) s390_emit16(c,opc) - -#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm)) - -#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2)) - -#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2)) - -#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2)) - -#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2)) - -#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2)) - -#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff))) - -#define S390_RXE(c,opc,g1,n2,s2,p2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ - s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_RXY(c,opc,g1,n2,s2,p2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \ - s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff))) - -#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff))) - -#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff))) - -#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ - s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \ - s390_emit32(c, ((s2) << 28 | (((p2) & 0xfffff) << 8) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_RSL(c,opc,ln,s1,p1) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \ - s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff))) - -#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff))) - -#define S390_RIE(c,opc,g1,g3,m2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ - s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \ -} while (0) - -#define S390_RIL_1(c,opc,g1,m2) do \ -{ \ - s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \ - s390_emit32(c, m2); \ -} while (0) - -#define S390_RIL_2(c,opc,k1,m2) do \ -{ \ - s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \ - s390_emit32(c, m2); \ -} while (0) - -#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff))); - -#define S390_SIY(c,opc,s1,p1,m2) do \ -{ \ - s390_emit16(c, ((opc & 0xff00) | m2)); \ - s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \ - (opc & 0xff))); \ -} while (0) - -#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff))) - -#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \ -{ \ - s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \ - (s1) << 12 | ((p1) & 0xfff))); \ - s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ -} while (0) - -#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \ -{ \ - s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \ - (s1) << 12 | ((p1) & 0xfff))); \ - s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ -} while (0) - -#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \ -{ \ - s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ - (s1) << 12 | ((p1) & 0xfff))); \ - s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ -} while (0) - -#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \ -{ \ - s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \ - (s2) << 12 | ((p2) & 0xfff))); \ - s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \ -} while (0) - -#define S390_SSE(c,opc,s1,p1,s2,p2) do \ -{ \ - s390_emit16(c, opc); \ - s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \ - s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ -} while (0) - -#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) -#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) -#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) -#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) -#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) -#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d) -#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) -#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) -#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) -#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) -#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) -#define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2) -#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) -#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) -#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) -#define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) -#define s390_break(c) S390_RR(c, 0, 0, 0) -#define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) -#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) -#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) -#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2) -#define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d) -#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) -#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) -#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) -#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) -#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) -#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) -#define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d) -#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) -#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) -#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3) -#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2) -#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2) -#define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2) -#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d) -#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) -#define s390_jc(c, m, d) s390_brc(c, m, d) -#define s390_j(c,d) s390_brc(c, S390_CC_UN, d) -#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) -#define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) -#define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) -#define s390_jh(c, d) s390_brc(c, S390_CC_GT, d) -#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d) -#define s390_jl(c, d) s390_brc(c, S390_CC_LT, d) -#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d) -#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d) -#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d) -#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d) -#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d) -#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d) -#define s390_jno(c, d) s390_brc(c, S390_CC_NO, d) -#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) -#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) -#define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d) -#define s390_jnc(c, d) s390_brc(c, S390_CC_NC, d) -#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) -#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d) -#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o) -#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2) -#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2) -#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) -#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d) -#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) -#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) -#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) -#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) -#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) -#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) -#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) -#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) -#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) -#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) -#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2) -#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) -#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) -#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) -#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0) -#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0) -#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) -#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) -#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) -#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) -#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) -#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) -#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) -#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) -#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) -#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) -#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) -#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) -#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) -#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) -#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) -#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) -#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d) -#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d) -#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2) -#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2) -#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) -#define s390_slb(c, r, x, b, d) S390_RXY(c, 0xe399, r, x, b, d) -#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) -#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) -#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) -#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) -#define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) -#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2) -#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2) -#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2) -#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d) -#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2) -#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d) -#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d) -#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d) -#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d) -#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d) -#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d) -#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d) -#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d) -#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d) -#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d) -#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d) -#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d) -#define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) -#define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) -#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) -#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) -#endif diff --git a/s390/tramp.c b/s390/tramp.c deleted file mode 100644 index 475a4bf..0000000 --- a/s390/tramp.c +++ /dev/null @@ -1,1154 +0,0 @@ -/*------------------------------------------------------------------*/ -/* */ -/* Name - tramp.c */ -/* */ -/* Function - Create trampolines to invoke arbitrary functions. */ -/* */ -/* Name - Neale Ferguson. */ -/* */ -/* Date - October, 2002 */ -/* */ -/* */ -/*------------------------------------------------------------------*/ - -/*------------------------------------------------------------------*/ -/* D e f i n e s */ -/*------------------------------------------------------------------*/ - -#define PROLOG_INS 24 /* Size of emitted prolog */ -#define CALL_INS 4 /* Size of emitted call */ -#define EPILOG_INS 18 /* Size of emitted epilog */ - -#define DEBUG(x) - -/*========================= End of Defines =========================*/ - -/*------------------------------------------------------------------*/ -/* I n c l u d e s */ -/*------------------------------------------------------------------*/ - -#ifdef NEED_MPROTECT -# include -# include /* for PAGESIZE */ -# ifndef PAGESIZE -# define PAGESIZE 4096 -# endif -#endif - -#include "config.h" -#include -#include -#include "s390-codegen.h" -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/marshal.h" - -/*========================= End of Includes ========================*/ - -/*------------------------------------------------------------------*/ -/* T y p e d e f s */ -/*------------------------------------------------------------------*/ - -/*------------------------------------------------------------------*/ -/* Structure used to accummulate size of stack, code, and locals */ -/*------------------------------------------------------------------*/ -typedef struct { - guint stack_size, - local_size, - code_size, - retStruct; -} size_data; - -/*========================= End of Typedefs ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - add_general */ -/* */ -/* Function - Determine code and stack size incremements for a */ -/* parameter. */ -/* */ -/*------------------------------------------------------------------*/ - -static void inline -add_general (guint *gr, size_data *sz, gboolean simple) -{ - if (simple) { - if (*gr >= GENERAL_REGS) { - sz->stack_size += sizeof(long); - sz->code_size += 12; - } else { - sz->code_size += 8; - } - } else { - if (*gr >= GENERAL_REGS - 1) { - sz->stack_size += 8 + (sz->stack_size % 8); - sz->code_size += 10; - } else { - sz->code_size += 8; - } - (*gr) ++; - } - (*gr) ++; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - calculate_sizes */ -/* */ -/* Function - Determine the amount of space required for code */ -/* and stack. In addition determine starting points */ -/* for stack-based parameters, and area for struct- */ -/* ures being returned on the stack. */ -/* */ -/*------------------------------------------------------------------*/ - -static void inline -calculate_sizes (MonoMethodSignature *sig, size_data *sz, - gboolean string_ctor) -{ - guint i, fr, gr, size; - guint32 simpletype, align; - - fr = 0; - gr = 2; - sz->retStruct = 0; - sz->stack_size = S390_MINIMAL_STACK_SIZE; - sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS); - sz->local_size = 0; - - if (sig->hasthis) { - add_general (&gr, sz, TRUE); - } - - /*----------------------------------------------------------*/ - /* We determine the size of the return code/stack in case we*/ - /* need to reserve a register to be used to address a stack */ - /* area that the callee will use. */ - /*----------------------------------------------------------*/ - - if (sig->ret->byref || string_ctor) { - sz->code_size += 8; - } else { - simpletype = sig->ret->type; -enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_R4: - case MONO_TYPE_R8: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_PTR: - case MONO_TYPE_STRING: - sz->code_size += 4; - break; - case MONO_TYPE_I8: - sz->code_size += 4; - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - gr++; - if (sig->pinvoke) - size = mono_class_native_size (sig->ret->data.klass, &align); - else - size = mono_class_value_size (sig->ret->data.klass, &align); - if (align > 1) - sz->code_size += 10; - switch (size) { - /*----------------------------------*/ - /* On S/390, structures of size 1, */ - /* 2, 4, and 8 bytes are returned */ - /* in (a) register(s). */ - /*----------------------------------*/ - case 1: - case 2: - case 4: - case 8: - sz->code_size += 16; - sz->stack_size += 4; - break; - default: - sz->retStruct = 1; - sz->code_size += 32; - } - break; - case MONO_TYPE_VOID: - break; - default: - g_error ("tramp: cannot handle as return value 0x%x", sig->ret->type); - } - } - - /*----------------------------------------------------------*/ - /* We determine the size of the parameter code and stack */ - /* requirements by checking the types and sizes of the */ - /* parameters. */ - /*----------------------------------------------------------*/ - - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) { - add_general (&gr, sz, TRUE); - continue; - } - simpletype = sig->params [i]->type; - enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - add_general (&gr, sz, TRUE); - break; - case MONO_TYPE_SZARRAY: - add_general (&gr, sz, TRUE); - break; - case MONO_TYPE_VALUETYPE: - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - if (sig->pinvoke) - size = mono_class_native_size (sig->params [i]->data.klass, &align); - else - size = mono_class_value_size (sig->params [i]->data.klass, &align); - DEBUG(printf("%d typesize: %d (%d)\n",i,size,align)); - switch (size) { - /*----------------------------------*/ - /* On S/390, structures of size 1, */ - /* 2, 4, and 8 bytes are passed in */ - /* (a) register(s). */ - /*----------------------------------*/ - case 0: - case 1: - case 2: - case 4: - add_general(&gr, sz, TRUE); - break; - case 8: - add_general(&gr, sz, FALSE); - break; - default: - sz->local_size += (size + (size % align)); - sz->code_size += 40; - } - break; - case MONO_TYPE_I8: - add_general (&gr, sz, FALSE); - break; - case MONO_TYPE_R4: - if (fr < FLOAT_REGS) { - sz->code_size += 4; - fr++; - } - else { - sz->code_size += 4; - sz->stack_size += 8; - } - break; - case MONO_TYPE_R8: - if (fr < FLOAT_REGS) { - sz->code_size += 4; - fr++; - } else { - sz->code_size += 4; - sz->stack_size += 8 + (sz->stack_size % 8); - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - - /* align stack size to 8 */ - DEBUG (printf (" stack size: %d (%d)\n" - " code size: %d\n" - " local size: %d\n", - (sz->stack_size + 8) & ~8, sz->stack_size, - (sz->code_size),(sz->local_size + 8) & ~8)); - sz->stack_size = (sz->stack_size + 8) & ~8; - sz->local_size = (sz->local_size + 8) & ~8; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - emit_prolog */ -/* */ -/* Function - Create the instructions that implement the stand- */ -/* ard function prolog according to the S/390 ABI. */ -/* */ -/*------------------------------------------------------------------*/ - -static inline guint8 * -emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz) -{ - guint stack_size; - - stack_size = sz->stack_size + sz->local_size; - - /* function prolog */ - s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); - s390_l (p, s390_r7, 0, STK_BASE, MINV_POS); - s390_lr (p, s390_r11, STK_BASE); - s390_ahi (p, STK_BASE, -stack_size); - s390_st (p, s390_r11, 0, STK_BASE, 0); - - /*-----------------------------------------*/ - /* Save: */ - /* - address of "callme" */ - /* - address of "retval" */ - /* - address of "arguments" */ - /*-----------------------------------------*/ - s390_lr (p, s390_r9, s390_r2); - s390_lr (p, s390_r8, s390_r3); - s390_lr (p, s390_r10, s390_r5); - - return p; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - emit_save_parameters */ -/* */ -/* Function - Create the instructions that load registers with */ -/* parameters, place others on the stack according */ -/* to the S/390 ABI. */ -/* */ -/* The resulting function takes the form: */ -/* void func (void (*callme)(), void *retval, */ -/* void *this_obj, stackval *arguments); */ -/* */ -/*------------------------------------------------------------------*/ - -inline static guint8* -emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz) -{ - guint i, fr, gr, act_strs, align, - stack_par_pos, size, local_pos; - guint32 simpletype; - - /*----------------------------------------------------------*/ - /* If a structure on stack is being returned, reserve r2 */ - /* to point to an area where it can be passed. */ - /*----------------------------------------------------------*/ - if (sz->retStruct) - gr = 1; - else - gr = 0; - fr = 0; - act_strs = 0; - stack_par_pos = S390_MINIMAL_STACK_SIZE; - local_pos = sz->stack_size; - - if (sig->hasthis) { - s390_lr (p, s390_r2 + gr, s390_r4); - gr++; - } - - act_strs = 0; - for (i = 0; i < sig->param_count; ++i) { - DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref)); - if (sig->params [i]->byref) { - if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); - gr ++; - } else { - s390_l (p, s390_r0, 0, ARG_BASE, STKARG); - s390_st (p, s390_r0, 0, STK_BASE, stack_par_pos); - stack_par_pos += sizeof(long); - } - continue; - } - simpletype = sig->params [i]->type; - enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); - gr ++; - } else { - s390_l (p, s390_r0, 0, ARG_BASE, STKARG); - s390_st (p, s390_r0, 0, STK_BASE, stack_par_pos); - stack_par_pos += sizeof(long); - } - break; - case MONO_TYPE_VALUETYPE: - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - if (sig->pinvoke) - size = mono_class_native_size (sig->params [i]->data.klass, &align); - else - size = mono_class_value_size (sig->params [i]->data.klass, &align); - DEBUG(printf("parStruct - size %d pinvoke: %d\n",size,sig->pinvoke)); - switch (size) { - case 0: - case 1: - case 2: - case 4: - if (gr < GENERAL_REGS) { - s390_l (p, s390_r2 + gr, 0,ARG_BASE, STKARG); - s390_l (p, s390_r2 + gr, 0, s390_r2 + gr, 0); - gr++; - } else { - stack_par_pos += (stack_par_pos % align); - s390_l (p, s390_r10, 0,ARG_BASE, STKARG); - s390_l (p, s390_r10, 0, s390_r10, 0); - s390_st (p, s390_r10, 0, STK_BASE, stack_par_pos); - stack_par_pos += sizeof(long); - } - break; - case 8: - if (gr < GENERAL_REGS-1) { - s390_l (p, s390_r2 + gr, 0, ARG_BASE, STKARG); - s390_lm (p, s390_r2 + gr, s390_r3 + gr, s390_r2 + gr, 0); - } else { - stack_par_pos += (stack_par_pos % align); - s390_l (p, s390_r10, 0, ARG_BASE, STKARG); - s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0); - stack_par_pos += sizeof(long long); - } - break; - default: - if (size <= 256) { - local_pos += (local_pos % align); - s390_l (p, s390_r13, 0, ARG_BASE, STKARG); - s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0); - s390_la (p, s390_r13, 0, STK_BASE, local_pos); - local_pos += size; - } else { - local_pos += (local_pos % align); - s390_bras (p, s390_r13, 4); - s390_word (p, size); - s390_l (p, s390_r1, 0, s390_r13, 0); - s390_l (p, s390_r0, 0, ARG_BASE, STKARG); - s390_lr (p, s390_r14, s390_r12); - s390_la (p, s390_r12, 0, STK_BASE, local_pos); - s390_lr (p, s390_r13, s390_r1); - s390_mvcl (p, s390_r12, s390_r0); - s390_lr (p, s390_r12, s390_r14); - s390_la (p, s390_r13, 0, STK_BASE, local_pos); - local_pos += size; - } - if (gr < GENERAL_REGS) { - s390_lr (p, s390_r2 + gr, s390_r13); - gr++; - } else { - s390_st (p, s390_r13, 0, STK_BASE, stack_par_pos); - stack_par_pos += sizeof(long); - } - } - break; - case MONO_TYPE_I8: - if (gr < GENERAL_REGS-1) { - s390_lm (p, s390_r2 + gr, s390_r2 + gr + 1, ARG_BASE, STKARG); - gr += 2; - } else { - *(guint32 *) p += 7; - *(guint32 *) p &= ~7; - s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, ARG_BASE, STKARG); - stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); - } - break; - case MONO_TYPE_R4: - if (fr < FLOAT_REGS) { - s390_le (p, s390_r0 + fr, 0, ARG_BASE, STKARG); - fr++; - } else { - s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG); - stack_par_pos += sizeof(float); - } - break; - case MONO_TYPE_R8: - if (fr < FLOAT_REGS) { - s390_ld (p, s390_r0 + fr, 0, ARG_BASE, STKARG); - fr++; - } else { - *(guint32 *) p += 7; - *(guint32 *) p &= ~7; - s390_mvc (p, sizeof(double), STK_BASE, stack_par_pos, ARG_BASE, STKARG); - stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long)); - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - /*----------------------------------------------------------*/ - /* If we're returning a structure but not in a register */ - /* then point the result area for the called routine */ - /*----------------------------------------------------------*/ - if (sz->retStruct) { - s390_l (p, s390_r2, 0, s390_r8, 0); - } - - return p; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - alloc_code_memory */ -/* */ -/* Function - Allocate space to place the emitted code. */ -/* */ -/*------------------------------------------------------------------*/ - -static inline guint8 * -alloc_code_memory (guint code_size) -{ - guint8 *p; - -#ifdef NEED_MPROTECT - p = g_malloc (code_size + PAGESIZE - 1); - - /* Align to a multiple of PAGESIZE, assumed to be a power of two */ - p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1)); -#else - p = g_malloc (code_size); -#endif - DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4)); - - return p; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - emit_call_and_store_retval */ -/* */ -/* Function - Emit code that will implement the call to the */ -/* desired function, and unload the result according */ -/* to the S390 ABI for the type of value returned */ -/* */ -/*------------------------------------------------------------------*/ - -static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, - size_data *sz, gboolean string_ctor) -{ - guint32 simpletype; - guint retSize, align; - - /* call "callme" */ - s390_basr (p, s390_r14, s390_r9); - - /* get return value */ - if (sig->ret->byref || string_ctor) { - s390_st (p, s390_r2, 0, s390_r8, 0); - } else { - simpletype = sig->ret->type; -enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - s390_stc (p, s390_r2, 0, s390_r8, 0); - break; - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - s390_sth (p, s390_r2, 0, s390_r8, 0); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - s390_st (p, s390_r2, 0, s390_r8, 0); - break; - case MONO_TYPE_R4: - s390_ste (p, s390_f0, 0, s390_r8, 0); - break; - case MONO_TYPE_R8: - s390_std (p, s390_f0, 0, s390_r8, 0); - break; - case MONO_TYPE_I8: - s390_stm (p, s390_r2, s390_r3, s390_r8, 0); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - if (sig->pinvoke) - retSize = mono_class_native_size (sig->ret->data.klass, &align); - else - retSize = mono_class_value_size (sig->ret->data.klass, &align); -printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke); - switch(retSize) { - case 0: - break; - case 1: - s390_stc (p, s390_r2, 0, s390_r8, 0); - break; - case 2: - s390_sth (p, s390_r2, 0, s390_r8, 0); - break; - case 4: - s390_st (p, s390_r2, 0, s390_r8, 0); - break; - case 8: - s390_stm (p, s390_r2, s390_r3, s390_r8, 0); - break; - default: ; - /*------------------------------------------*/ - /* The callee has already placed the result */ - /* in the required area */ - /*------------------------------------------*/ - break; - } - break; - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", - sig->ret->type); - } - } - - return p; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - emit_epilog */ -/* */ -/* Function - Create the instructions that implement the stand- */ -/* ard function epilog according to the S/390 ABI. */ -/* */ -/*------------------------------------------------------------------*/ - -static inline guint8 * -emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz) -{ - /* function epilog */ - s390_l (p, STK_BASE, 0, STK_BASE, 0); - s390_l (p, s390_r4, 0, STK_BASE, 56); - s390_lm (p, s390_r6, STK_BASE, STK_BASE, 24); - s390_br (p, s390_r4); - - return p; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - mono_arch_create_trampoline. */ -/* */ -/* Function - Create the code that will allow a mono method to */ -/* invoke a system subroutine. */ -/* */ -/*------------------------------------------------------------------*/ - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - guint8 *p, *code_buffer; - size_data sz; - - DEBUG (printf ("\nPInvoke [start emiting]\n")); - calculate_sizes (sig, &sz, string_ctor); - - p = code_buffer = alloc_code_memory (sz.code_size); - p = emit_prolog (p, sig, &sz); - p = emit_save_parameters (p, sig, &sz); - p = emit_call_and_store_retval (p, sig, &sz, string_ctor); - p = emit_epilog (p, sig, &sz); - -#ifdef NEED_MPROTECT - if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) { - g_error ("Cannot mprotect trampoline\n"); - } -#endif - - DEBUG (printf ("emited code size: %d\n", p - code_buffer)); - - DEBUG (printf ("PInvoke [end emiting]\n")); - - return (MonoPIFunc) code_buffer; -} - -/*========================= End of Function ========================*/ - -/*------------------------------------------------------------------*/ -/* */ -/* Name - mono_arch_create_method_pointer */ -/* */ -/* Function - Returns a pointer to a native function that can */ -/* be used to call the specified method. */ -/* */ -/* The function created will receive the arguments */ -/* according to the calling convention specified in */ -/* in the method. */ -/* */ -/* This function works by creating a MonoInvocation */ -/* structure, filling the fields in and calling */ -/* ves_exec_method() on it. */ -/* */ -/* Logic: */ -/* ------ */ -/* mono_arch_create_method_pointer (MonoMethod *method) */ -/* create the unmanaged->managed wrapper */ -/* register it with mono_jit_info_table_add() */ -/* */ -/* What does the unmanaged->managed wrapper do? */ -/* allocate a MonoInvocation structure (inv) on the stack */ -/* allocate an array of stackval on the stack with length = */ -/* method->signature->param_count + 1 [call it stack_args] */ -/* set inv->ex, inv->ex_handler, inv->parent to NULL */ -/* set inv->method to method */ -/* if method is an instance method, set inv->obj to the */ -/* 'this' argument (the first argument) else set to NULL */ -/* for each argument to the method call: */ -/* stackval_from_data (sig->params[i], &stack_args[i], */ -/* arg, sig->pinvoke); */ -/* Where: */ -/* ------ */ -/* sig - is method->signature */ -/* &stack_args[i] - is the pointer to the ith element */ -/* in the stackval array */ -/* arg - is a pointer to the argument re- */ -/* ceived by the function according */ -/* to the call convention. If it */ -/* gets passed in a register, save */ -/* on the stack first. */ -/* */ -/* set inv->retval to the address of the last element of */ -/* stack_args [recall we allocated param_count+1 of them] */ -/* call ves_exec_method(inv) */ -/* copy the returned value from inv->retval where the calling */ -/* convention expects to find it on return from the wrap- */ -/* per [if it's a structure, use stackval_to_data] */ -/* */ -/*------------------------------------------------------------------*/ - -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig; - MonoJitInfo *ji; - guint8 *p, *code_buffer; - guint i, align = 0, simple_type, retSize, reg_save = 0, - stackval_arg_pos, local_pos, float_pos, - local_start, reg_param = 0, stack_param, - this_flag, arg_pos, fpr_param, parSize; - guint32 simpletype; - size_data sz; - int *vtbuf, cpos, vt_cur; - - sz.code_size = 1024; - sz.stack_size = 1024; - stack_param = 0; - fpr_param = 0; - arg_pos = 0; - - sig = method->signature; - - p = code_buffer = g_malloc (sz.code_size); - - DEBUG (printf ("\nDelegate [start emiting] %s at 0x%08x\n", - method->name,p)); - - /*----------------------------------------------------------*/ - /* prolog */ - /*----------------------------------------------------------*/ - s390_stm (p, s390_r6, STK_BASE, STK_BASE, 24); - s390_l (p, s390_r7, 0, STK_BASE, MINV_POS); - s390_lr (p, s390_r0, STK_BASE); - s390_ahi (p, STK_BASE, -(sz.stack_size+MINV_POS)); - s390_st (p, s390_r0, 0, STK_BASE, 0); - s390_la (p, s390_r8, 0, STK_BASE, 4); - s390_lr (p, s390_r10, s390_r8); - s390_lhi (p, s390_r9, sz.stack_size+92); - s390_lhi (p, s390_r11, 0); - s390_mvcl(p, s390_r8, s390_r10); - - /*----------------------------------------------------------*/ - /* Let's fill MonoInvocation - first zero some fields */ - /*----------------------------------------------------------*/ - s390_lhi (p, s390_r0, 0); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex))); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler))); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent))); - s390_lhi (p, s390_r0, 1); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap))); - - /*----------------------------------------------------------*/ - /* set method pointer */ - /*----------------------------------------------------------*/ - s390_bras (p, s390_r13, 4); - s390_word (p, method); - s390_l (p, s390_r0, 0, s390_r13, 0); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method))); - - local_start = local_pos = MINV_POS + - sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); - this_flag = (sig->hasthis ? 1 : 0); - - /*----------------------------------------------------------*/ - /* if we are returning a structure, checks it's length to */ - /* see if there's a "hidden" parameter that points to the */ - /* area. If necessary save this hidden parameter for later */ - /*----------------------------------------------------------*/ - if (MONO_TYPE_ISSTRUCT(sig->ret)) { - if (sig->pinvoke) - retSize = mono_class_native_size (sig->ret->data.klass, &align); - else - retSize = mono_class_value_size (sig->ret->data.klass, &align); - switch(retSize) { - case 0: - case 1: - case 2: - case 4: - case 8: - sz.retStruct = 0; - break; - default: - sz.retStruct = 1; - s390_lr(p, s390_r8, s390_r2); - reg_save = 1; - } - } else { - reg_save = 0; - } - - if (this_flag) { - s390_st (p, s390_r2 + reg_save, 0, STK_BASE, - (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); - reg_param++; - } else { - s390_st (p, s390_r2 + reg_save, 0, STK_BASE, local_pos); - local_pos += sizeof(int); - s390_st (p, s390_r0, 0, STK_BASE, - (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj))); - } - - s390_stm (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos); - local_pos += 4 * sizeof(long); - float_pos = local_pos; - s390_std (p, s390_f0, 0, STK_BASE, local_pos); - local_pos += sizeof(double); - s390_std (p, s390_f2, 0, STK_BASE, local_pos); - local_pos += sizeof(double); - - /*----------------------------------------------------------*/ - /* prepare space for valuetypes */ - /*----------------------------------------------------------*/ - vt_cur = local_pos; - vtbuf = alloca (sizeof(int)*sig->param_count); - cpos = 0; - for (i = 0; i < sig->param_count; i++) { - MonoType *type = sig->params [i]; - vtbuf [i] = -1; - DEBUG(printf("par: %d type: %d ref: %d\n",i,type->type,type->byref)); - if (type->type == MONO_TYPE_VALUETYPE) { - MonoClass *klass = type->data.klass; - gint size; - - if (klass->enumtype) - continue; - size = mono_class_native_size (klass, &align); - cpos += align - 1; - cpos &= ~(align - 1); - vtbuf [i] = cpos; - cpos += size; - } - } - cpos += 3; - cpos &= ~3; - - local_pos += cpos; - - /*----------------------------------------------------------*/ - /* set MonoInvocation::stack_args */ - /*----------------------------------------------------------*/ - stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); - s390_la (p, s390_r0, 0, STK_BASE, stackval_arg_pos); - s390_st (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args))); - - /*----------------------------------------------------------*/ - /* add stackval arguments */ - /*----------------------------------------------------------*/ - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) { - ADD_ISTACK_PARM(0, 1); - } else { - simple_type = sig->params [i]->type; - enum_savechk: - switch (simple_type) { - case MONO_TYPE_I8: - ADD_ISTACK_PARM(-1, 2); - break; - case MONO_TYPE_R4: - ADD_RSTACK_PARM(1); - break; - case MONO_TYPE_R8: - ADD_RSTACK_PARM(2); - break; - case MONO_TYPE_VALUETYPE: - if (sig->params [i]->data.klass->enumtype) { - simple_type = sig->params [i]->data.klass->enum_basetype->type; - goto enum_savechk; - } - if (sig->pinvoke) - parSize = mono_class_native_size (sig->params [i]->data.klass, &align); - else - parSize = mono_class_value_size (sig->params [i]->data.klass, &align); - switch(parSize) { - case 0: - case 1: - case 2: - case 4: - ADD_PSTACK_PARM(0, 1); - break; - case 8: - ADD_PSTACK_PARM(-1, 2); - break; - default: - ADD_TSTACK_PARM; - } - break; - default: - ADD_ISTACK_PARM(0, 1); - } - } - - if (vtbuf [i] >= 0) { - s390_la (p, s390_r3, 0, STK_BASE, vt_cur); - s390_st (p, s390_r3, 0, STK_BASE, stackval_arg_pos); - s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); - vt_cur += vtbuf [i]; - } else { - s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos); - } - - /*--------------------------------------*/ - /* Load the parameter registers for the */ - /* call to stackval_from_data */ - /*--------------------------------------*/ - s390_bras (p, s390_r13, 8); - s390_word (p, sig->params [i]); - s390_word (p, sig->pinvoke); - s390_word (p, stackval_from_data); - s390_l (p, s390_r2, 0, s390_r13, 0); - - s390_l (p, s390_r5, 0, s390_r13, 4); - - s390_l (p, s390_r1, 0, s390_r13, 8); - s390_basr (p, s390_r14, s390_r1); - - stackval_arg_pos += sizeof(stackval); - - /* fixme: alignment */ - DEBUG (printf ("arg_pos %d --> ", arg_pos)); - if (sig->pinvoke) - arg_pos += mono_type_native_stack_size (sig->params [i], &align); - else - arg_pos += mono_type_stack_size (sig->params [i], &align); - - DEBUG (printf ("%d\n", stackval_arg_pos)); - } - - /*----------------------------------------------------------*/ - /* Set return area pointer. */ - /*----------------------------------------------------------*/ - s390_la (p, s390_r10, 0, STK_BASE, stackval_arg_pos); - s390_st (p, s390_r10, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - s390_la (p, s390_r9, 0, s390_r10, sizeof(stackval)); - s390_st (p, s390_r9, 0,STK_BASE, stackval_arg_pos); - stackval_arg_pos += sizeof(stackval); - } - } - - /*----------------------------------------------------------*/ - /* call ves_exec_method */ - /*----------------------------------------------------------*/ - s390_bras (p, s390_r13, 4); - s390_word (p, ves_exec_method); - s390_l (p, s390_r1, 0, s390_r13, 0); - s390_la (p, s390_r2, 0, STK_BASE, MINV_POS); - s390_basr (p, s390_r14, s390_r1); - - /*----------------------------------------------------------*/ - /* move retval from stackval to proper place (r3/r4/...) */ - /*----------------------------------------------------------*/ - DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref)); - if (sig->ret->byref) { - DEBUG (printf ("ret by ref\n")); - s390_st (p, s390_r2, 0, s390_r10, 0); - } else { - enum_retvalue: -DEBUG(printf("Returns: %d\n",sig->ret->type)); - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_U1: - s390_lhi (p, s390_r2, 0); - s390_ic (p, s390_r2, 0, s390_r10, 0); - break; - case MONO_TYPE_I2: - case MONO_TYPE_U2: - s390_lh (p, s390_r2, 0,s390_r10, 0); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - s390_l (p, s390_r2, 0, s390_r10, 0); - break; - case MONO_TYPE_I8: - s390_lm (p, s390_r2, s390_r3, s390_r10, 0); - break; - case MONO_TYPE_R4: - s390_le (p, s390_f0, 0, s390_r10, 0); - break; - case MONO_TYPE_R8: - s390_ld (p, s390_f0, 0, s390_r10, 0); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - /*---------------------------------*/ - /* Call stackval_to_data to return */ - /* the structure */ - /*---------------------------------*/ - s390_bras (p, s390_r13, 8); - s390_word (p, sig->ret); - s390_word (p, sig->pinvoke); - s390_word (p, stackval_to_data); - s390_l (p, s390_r2, 0, s390_r13, 0); - s390_l (p, s390_r3, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval))); - if (sz.retStruct) { - /*------------------------------------------*/ - /* Get stackval_to_data to set result area */ - /*------------------------------------------*/ - s390_lr (p, s390_r4, s390_r8); - } else { - /*------------------------------------------*/ - /* Give stackval_to_data a temp result area */ - /*------------------------------------------*/ - s390_la (p, s390_r4, 0, STK_BASE, stackval_arg_pos); - } - s390_l (p, s390_r5, 0,s390_r13, 4); - s390_l (p, s390_r1, 0, s390_r13, 8); - s390_basr (p, s390_r14, s390_r1); - switch (retSize) { - case 0: - break; - case 1: - s390_lhi (p, s390_r2, 0); - s390_ic (p, s390_r2, 0, s390_r10, 0); - break; - case 2: - s390_lh (p, s390_r2, 0, s390_r10, 0); - break; - case 4: - s390_l (p, s390_r2, 0, s390_r10, 0); - break; - case 8: - s390_lm (p, s390_r2, s390_r3, s390_r10, 0); - break; - default: ; - /*-------------------------------------------------*/ - /* stackval_to_data has placed data in result area */ - /*-------------------------------------------------*/ - break; - } - break; - default: - g_error ("Type 0x%x not handled yet in thunk creation", - sig->ret->type); - break; - } - } - - /*----------------------------------------------------------*/ - /* epilog */ - /*----------------------------------------------------------*/ - s390_l (p, STK_BASE, 0, STK_BASE, 0); - s390_l (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET); - s390_lm (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET); - s390_br (p, s390_r4); - - DEBUG (printf ("emited code size: %d\n", p - code_buffer)); - - DEBUG (printf ("Delegate [end emiting]\n")); - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = p - code_buffer; - ji->code_start = code_buffer; - - mono_jit_info_table_add (mono_get_root_domain (), ji); - - return ji->code_start; -} - -/*========================= End of Function ========================*/ -- cgit v1.1 From a42bc8f14a3393150fb6fbb772c2b0259267f5ae Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Thu, 25 Apr 2013 10:01:14 -0400 Subject: Add lazy rgctx support to s390x --- s390x/s390x-codegen.h | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index d3292bf..3677b2d 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -593,11 +593,14 @@ typedef struct { #define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) #define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2) #define s390_bctrg(c, r1, r2) S390_RRE(c, 0xb946, r1, r2) +#define s390_bnzr(c, r) S390_RR(c, 0x07, 0x07, r) #define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o) #define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o) #define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d) +#define s390_brcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) #define s390_br(c, r) S390_RR(c, 0x07, 0xf, r) #define s390_break(c) S390_RR(c, 0, 0, 0) +#define s390_bzr(c, r) S390_RR(c, 0x07, 0x08, r) #define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d) #define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d) #define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2) @@ -636,7 +639,7 @@ typedef struct { #define s390_icy(c, r, x, b, d) S390_RXY(c, 0xe373, r, x, b, d) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) #define s390_jc(c, m, d) s390_brc(c, m, d) -#define s390_jcl(c, m, d) S390_RIL_2(c, 0xc04, m, d) +#define s390_jcl(c, m, d) s390_brcl(c, m, d) #define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d) #define s390_je(c, d) s390_brc(c, S390_CC_EQ, d) #define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d) @@ -654,6 +657,24 @@ typedef struct { #define s390_jno(c, d) s390_brc(c, S390_CC_NO, d) #define s390_jp(c, d) s390_brc(c, S390_CC_GT, d) #define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d) +#define s390_jg(c,d) s390_brcl(c, S390_CC_UN, d) +#define s390_jgcy(c, d) s390_brcl(c, S390_CC_CY, d) +#define s390_jge(c, d) s390_brcl(c, S390_CC_EQ, d) +#define s390_jgeo(c, d) s390_brcl(c, S390_CC_ZR|S390_CC_OV, d) +#define s390_jgh(c, d) s390_brcl(c, S390_CC_GT, d) +#define s390_jgho(c, d) s390_brcl(c, S390_CC_GT|S390_CC_OV, d) +#define s390_jgl(c, d) s390_brcl(c, S390_CC_LT, d) +#define s390_jglo(c, d) s390_brcl(c, S390_CC_LT|S390_CC_OV, d) +#define s390_jgm(c, d) s390_brcl(c, S390_CC_LT, d) +#define s390_jgnc(c, d) s390_brcl(c, S390_CC_NC, d) +#define s390_jgne(c, d) s390_brcl(c, S390_CC_NZ, d) +#define s390_jgnh(c, d) s390_brcl(c, S390_CC_LE, d) +#define s390_jgnl(c, d) s390_brcl(c, S390_CC_GE, d) +#define s390_jgnz(c, d) s390_brcl(c, S390_CC_NZ, d) +#define s390_jgo(c, d) s390_brcl(c, S390_CC_OV, d) +#define s390_jgno(c, d) s390_brcl(c, S390_CC_NO, d) +#define s390_jgp(c, d) s390_brcl(c, S390_CC_GT, d) +#define s390_jgz(c, d) s390_brcl(c, S390_CC_ZR, d) #define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d) #define s390_ly(c, r, x, b, d) S390_RXY(c, 0xe358, r, x, b, d) #define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d) -- cgit v1.1 From 78c1e65942210449d0d1c4957b42242ebc9bdb5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 14 May 2013 03:10:43 +0200 Subject: Kill support for the ancient FPA format on ARM. --- arm/Makefile.am | 7 +- arm/arm-fpa-codegen.h | 198 -------------------------------------------------- arm/arm-vfp-codegen.h | 2 +- arm/fpa_macros.th | 15 ---- arm/fpam_macros.th | 14 ---- arm/fpaops.sh | 24 ------ 6 files changed, 2 insertions(+), 258 deletions(-) delete mode 100644 arm/arm-fpa-codegen.h delete mode 100644 arm/fpa_macros.th delete mode 100644 arm/fpam_macros.th delete mode 100755 arm/fpaops.sh diff --git a/arm/Makefile.am b/arm/Makefile.am index 86784c0..593574c 100644 --- a/arm/Makefile.am +++ b/arm/Makefile.am @@ -3,7 +3,7 @@ AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-arm.la -BUILT_SOURCES = arm_dpimacros.h arm_fpamacros.h arm_vfpmacros.h +BUILT_SOURCES = arm_dpimacros.h arm_vfpmacros.h libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \ @@ -16,10 +16,6 @@ arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th (cd $(srcdir); bash ./dpiops.sh) > $@t mv $@t $@ -arm_fpamacros.h: fpaops.sh fpam_macros.th fpa_macros.th - (cd $(srcdir); bash ./fpaops.sh) > $@t - mv $@t $@ - arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th (cd $(srcdir); bash ./vfpops.sh) > $@t mv $@t $@ @@ -27,6 +23,5 @@ arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th CLEANFILES = $(BUILT_SOURCES) EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \ - fpam_macros.th fpa_macros.th arm-fpa-codegen.h fpaops.sh \ vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh diff --git a/arm/arm-fpa-codegen.h b/arm/arm-fpa-codegen.h deleted file mode 100644 index 4389a5e..0000000 --- a/arm/arm-fpa-codegen.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2005 Novell Inc - * Copyright 2011 Xamarin Inc - */ - -#ifndef __MONO_ARM_FPA_CODEGEN_H__ -#define __MONO_ARM_FPA_CODEGEN_H__ - -#include "arm-codegen.h" - -enum { - /* FPA registers */ - ARM_FPA_F0, - ARM_FPA_F1, - ARM_FPA_F2, - ARM_FPA_F3, - ARM_FPA_F4, - ARM_FPA_F5, - ARM_FPA_F6, - ARM_FPA_F7, - - /* transfer length for LDF/STF (T0/T1), already shifted */ - ARM_FPA_SINGLE = 0, - ARM_FPA_DOUBLE = 1 << 15, - - ARM_FPA_ADF = 0 << 20, - ARM_FPA_MUF = 1 << 20, - ARM_FPA_SUF = 2 << 20, - ARM_FPA_RSF = 3 << 20, - ARM_FPA_DVF = 4 << 20, - ARM_FPA_RDF = 5 << 20, - ARM_FPA_POW = 6 << 20, - ARM_FPA_RPW = 7 << 20, - ARM_FPA_RMF = 8 << 20, - ARM_FPA_FML = 9 << 20, - ARM_FPA_FDV = 10 << 20, - ARM_FPA_FRD = 11 << 20, - ARM_FPA_POL = 12 << 20, - - /* monadic */ - ARM_FPA_MVF = (0 << 20) | (1 << 15), - ARM_FPA_MNF = (1 << 20) | (1 << 15), - ARM_FPA_ABS = (2 << 20) | (1 << 15), - ARM_FPA_RND = (3 << 20) | (1 << 15), - ARM_FPA_SQT = (4 << 20) | (1 << 15), - ARM_FPA_LOG = (5 << 20) | (1 << 15), - ARM_FPA_LGN = (6 << 20) | (1 << 15), - ARM_FPA_EXP = (7 << 20) | (1 << 15), - ARM_FPA_SIN = (8 << 20) | (1 << 15), - ARM_FPA_COS = (9 << 20) | (1 << 15), - ARM_FPA_TAN = (10 << 20) | (1 << 15), - ARM_FPA_ASN = (11 << 20) | (1 << 15), - ARM_FPA_ACS = (12 << 20) | (1 << 15), - ARM_FPA_ATN = (13 << 20) | (1 << 15), - ARM_FPA_URD = (14 << 20) | (1 << 15), - ARM_FPA_NRM = (15 << 20) | (1 << 15), - - /* round modes */ - ARM_FPA_ROUND_NEAREST = 0, - ARM_FPA_ROUND_PINF = 1, - ARM_FPA_ROUND_MINF = 2, - ARM_FPA_ROUND_ZERO = 3, - - /* round precision */ - ARM_FPA_ROUND_SINGLE = 0, - ARM_FPA_ROUND_DOUBLE = 1, - - /* constants */ - ARM_FPA_CONST_0 = 8, - ARM_FPA_CONST_1_0 = 9, - ARM_FPA_CONST_2_0 = 10, - ARM_FPA_CONST_3_0 = 11, - ARM_FPA_CONST_4_0 = 12, - ARM_FPA_CONST_5_0 = 13, - ARM_FPA_CONST_0_5 = 14, - ARM_FPA_CONST_10 = 15, - - /* compares */ - ARM_FPA_CMF = 4, - ARM_FPA_CNF = 5, - ARM_FPA_CMFE = 6, - ARM_FPA_CNFE = 7, - - /* CPRT ops */ - ARM_FPA_FLT = 0, - ARM_FPA_FIX = 1, - ARM_FPA_WFS = 2, - ARM_FPA_RFS = 3, - ARM_FPA_WFC = 4, - ARM_FPA_RFC = 5 -}; - -#define ARM_DEF_FPA_LDF_STF(cond,post,ls,fptype,wback,basereg,fdreg,offset) \ - ((offset) >= 0? (offset)>>2: -(offset)>>2) | \ - ((1 << 8) | (fptype)) | \ - ((fdreg) << 12) | \ - ((basereg) << 16) | \ - ((ls) << 20) | \ - ((wback) << 21) | \ - (((offset) >= 0) << 23) | \ - ((wback) << 21) | \ - ((post) << 24) | \ - (6 << 25) | \ - ARM_DEF_COND(cond) - -/* FP load and stores */ -#define ARM_FPA_LDFS_COND(p,freg,base,offset,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) -#define ARM_FPA_LDFS(p,freg,base,offset) \ - ARM_FPA_LDFS_COND(p,freg,base,offset,ARMCOND_AL) - -#define ARM_FPA_LDFD_COND(p,freg,base,offset,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_LDR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) -#define ARM_FPA_LDFD(p,freg,base,offset) \ - ARM_FPA_LDFD_COND(p,freg,base,offset,ARMCOND_AL) - -#define ARM_FPA_STFS_COND(p,freg,base,offset,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_SINGLE,0,(base),(freg),(offset))) -#define ARM_FPA_STFS(p,freg,base,offset) \ - ARM_FPA_STFS_COND(p,freg,base,offset,ARMCOND_AL) - -#define ARM_FPA_STFD_COND(p,freg,base,offset,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_LDF_STF((cond),1,ARMOP_STR,ARM_FPA_DOUBLE,0,(base),(freg),(offset))) -#define ARM_FPA_STFD(p,freg,base,offset) \ - ARM_FPA_STFD_COND(p,freg,base,offset,ARMCOND_AL) - -#define ARM_DEF_FPA_CPDO_MONADIC(cond,op,dreg,sreg,round,prec) \ - (1 << 8) | (14 << 24) | \ - (op) | \ - ((sreg) << 0) | \ - ((round) << 5) | \ - ((dreg) << 12) | \ - ((prec) << 7) | \ - ARM_DEF_COND(cond) - -#define ARM_DEF_FPA_CPDO_DYADIC(cond,op,dreg,sreg1,sreg2,round,prec) \ - (1 << 8) | (14 << 24) | \ - (op) | \ - ((sreg1) << 16) | \ - ((sreg2) << 0) | \ - ((round) << 5) | \ - ((dreg) << 12) | \ - ((prec) << 7) | \ - ARM_DEF_COND(cond) - -#define ARM_DEF_FPA_CMP(cond,op,sreg1,sreg2) \ - (1 << 4) | (1 << 8) | (15 << 12) | \ - (1 << 20) | (14 << 24) | \ - (op) << 21 | \ - (sreg1) << 16 | \ - (sreg2) | \ - ARM_DEF_COND(cond) - -#define ARM_DEF_FPA_CPRT(cond,op,fn,fm,rd,ftype,round) \ - (1 << 4) | (1 << 8) | (14 << 24) | \ - (op) << 20 | \ - (fm) | \ - (fn) << 16 | \ - (rd) << 12 | \ - ((round) << 5) | \ - ((ftype) << 7) | \ - ARM_DEF_COND(cond) - - -#include "arm_fpamacros.h" - -#define ARM_FPA_RNDDZ_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_RND,(dreg),(sreg),ARM_FPA_ROUND_ZERO,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_RNDDZ(p,dreg,sreg) ARM_FPA_RNDD_COND(p,dreg,sreg,ARMCOND_AL) - -/* compares */ -#define ARM_FPA_FCMP_COND(p,op,sreg1,sreg2,cond) \ - ARM_EMIT(p, ARM_DEF_FPA_CMP(cond,op,sreg1,sreg2)) -#define ARM_FPA_FCMP(p,op,sreg1,sreg2) ARM_FPA_FCMP_COND(p,op,sreg1,sreg2,ARMCOND_AL) - -/* coprocessor register transfer */ -#define ARM_FPA_FLTD(p,fn,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_DOUBLE,ARM_FPA_ROUND_NEAREST)) -#define ARM_FPA_FLTS(p,fn,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FLT,(fn),0,(rd),ARM_FPA_ROUND_SINGLE,ARM_FPA_ROUND_NEAREST)) - -#define ARM_FPA_FIXZ(p,rd,fm) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_FIX,0,(fm),(rd),0,ARM_FPA_ROUND_ZERO)) - -#define ARM_FPA_WFS(p,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) - -#define ARM_FPA_RFS(p,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFS,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) - -#define ARM_FPA_WFC(p,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_WFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) - -#define ARM_FPA_RFC(p,rd) \ - ARM_EMIT(p, ARM_DEF_FPA_CPRT(ARMCOND_AL,ARM_FPA_RFC,0,0,(rd),0,ARM_FPA_ROUND_NEAREST)) - -#endif /* __MONO_ARM_FPA_CODEGEN_H__ */ - diff --git a/arm/arm-vfp-codegen.h b/arm/arm-vfp-codegen.h index c4c5e3e..8b56b00 100644 --- a/arm/arm-vfp-codegen.h +++ b/arm/arm-vfp-codegen.h @@ -8,7 +8,7 @@ #include "arm-codegen.h" enum { - /* FPA registers */ + /* VFP registers */ ARM_VFP_F0, ARM_VFP_F1, ARM_VFP_F2, diff --git a/arm/fpa_macros.th b/arm/fpa_macros.th deleted file mode 100644 index 036b2a0..0000000 --- a/arm/fpa_macros.th +++ /dev/null @@ -1,15 +0,0 @@ -/* -- -- */ - - -/* Fd := Rn Rm */ -#define ARM_FPA_D_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_D(p, rd, rn, rm) \ - ARM_FPA_D_COND(p, rd, rn, rm, ARMCOND_AL) - -#define ARM_FPA_S_COND(p, rd, rn, rm, cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_DYADIC(cond,ARM_FPA_,rd,rn,rm,ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_S(p, rd, rn, rm) \ - ARM_FPA_S_COND(p, rd, rn, rm, ARMCOND_AL) - - diff --git a/arm/fpam_macros.th b/arm/fpam_macros.th deleted file mode 100644 index 15183c3..0000000 --- a/arm/fpam_macros.th +++ /dev/null @@ -1,14 +0,0 @@ -/* -- -- */ - - -/* Fd := Rm */ - -#define ARM_FPA_D_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_DOUBLE)) -#define ARM_FPA_D(p,dreg,sreg) ARM_FPA_D_COND(p,dreg,sreg,ARMCOND_AL) - -#define ARM_FPA_S_COND(p,dreg,sreg,cond) \ - ARM_EMIT((p), ARM_DEF_FPA_CPDO_MONADIC((cond),ARM_FPA_,(dreg),(sreg),ARM_FPA_ROUND_NEAREST,ARM_FPA_ROUND_SINGLE)) -#define ARM_FPA_S(p,dreg,sreg) ARM_FPA_S_COND(p,dreg,sreg,ARMCOND_AL) - - diff --git a/arm/fpaops.sh b/arm/fpaops.sh deleted file mode 100755 index be19876..0000000 --- a/arm/fpaops.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -DYADIC="ADF MUF SUF RSF DVF RDF POW RPW RMF FML FDV FRD POL" -MONADIC="MVF MNF ABS RND SQT LOG EXP SIN COS TAN ASN ACS ATN URD NRM" - -# $1: opcode list -# $2: template -gen() { - for i in $1; do - sed "s//$i/g" $2.th - done -} - -echo -e "/* Macros for FPA ops, auto-generated from template */\n" - -echo -e "\n/* dyadic */\n" -gen "$DYADIC" fpa_macros - -echo -e "\n/* monadic */\n" -gen "$MONADIC" fpam_macros - -echo -e "\n\n" - -echo -e "\n/* end generated */\n" -- cgit v1.1 From ab6a96ef346220433f9f7967b763a0453d9cbc66 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 14 May 2013 18:27:32 +0200 Subject: Enable hw division/remainder on mt in non-thumb mode as well. --- arm/arm-codegen.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 31c4575..d94653e 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1037,6 +1037,7 @@ typedef struct { #define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1) #define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1) +#define ARM_MLS(p, rd, rn, rm, ra) ARM_EMIT((p), (ARMCOND_AL << 28) | (0x6 << 20) | ((rd) << 16) | ((ra) << 12) | ((rm) << 8) | (0x9 << 4) | ((rn) << 0)) /* ARM V5 */ @@ -1095,6 +1096,13 @@ typedef union { #define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \ ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL) +/* ARMv7VE */ +#define ARM_SDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x1 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0))) +#define ARM_SDIV(p, rd, rn, rm) ARM_SDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL) + +#define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0))) +#define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL) + #ifdef __cplusplus } #endif -- cgit v1.1 From 2f56d471f089b8f514377ce501a0c1643652d639 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Fri, 24 May 2013 23:41:39 +0200 Subject: Merge some Nacl/ARM changes from https://github.com/igotti-google/mono/commit/65d8d68e8c81cf6adb1076de7a9425c84cab86a3. --- arm/arm-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index d94653e..5a3dba0 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1031,7 +1031,11 @@ typedef struct { ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL) #define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs) +#ifdef __native_client_codegen__ +#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0) +#else #define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010) +#endif #define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010) #define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1) -- cgit v1.1 From 43b05e3c36d05526f7a9f3f8767569d026e4f1c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 15 Nov 2013 15:08:06 +0100 Subject: Fix the `nop` opcode on some MIPS-based Loongson CPUs. After much trouble building Mono in Debian/MIPS, @directhex narrowed it down to this issue: https://sourceware.org/ml/binutils/2009-11/msg00387.html So since some of the 2E and 2F versions of the Loongson CPUs break with a regular `sll zero, zero, 0` we need to issue an `or at, at, 0`. This makes sure we don't randomly deadlock or blow up when the CPU is under heavy load. Yes, really. --- mips/mips-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mips/mips-codegen.h b/mips/mips-codegen.h index dc4df7d..1dbd1c6 100644 --- a/mips/mips-codegen.h +++ b/mips/mips-codegen.h @@ -334,7 +334,7 @@ enum { /* misc and coprocessor ops */ #define mips_move(c,dest,src) mips_addu(c,dest,src,mips_zero) #define mips_dmove(c,dest,src) mips_daddu(c,dest,src,mips_zero) -#define mips_nop(c) mips_sll(c,0,0,0) +#define mips_nop(c) mips_or(c,mips_at,mips_at,0) #define mips_break(c,code) mips_emit32(c, ((code)<<6)|13) #define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16) #define mips_mflo(c,dest) mips_format_r(c,0,0,0,dest,0,18) -- cgit v1.1 From 4a25d5fa1811be15c62979993cd1a37c2891d0a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 23 Nov 2013 18:26:55 +0100 Subject: Fix the encoding of x86_imul_reg_mem_imm. --- x86/x86-codegen.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ced466e..ad6282f 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -1039,7 +1039,7 @@ typedef union { } else { \ x86_codegen_pre(&(inst), 6); \ *(inst)++ = (unsigned char)0x69; \ - x86_reg_emit ((inst), (reg), (mem)); \ + x86_mem_emit ((inst), (reg), (mem)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) -- cgit v1.1 From 21ca1bad7d0447bb5d420a58128e1c2733635efa Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Wed, 11 Dec 2013 11:13:14 -0500 Subject: [arch]Add cvtsi2ss to amd64 codegen. --- amd64/amd64-codegen.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h index 9dd3269..3c40d9d 100644 --- a/amd64/amd64-codegen.h +++ b/amd64/amd64-codegen.h @@ -1054,6 +1054,10 @@ typedef union { #define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) +#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) + #define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) #define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) -- cgit v1.1 From 12741090edd2230bfd0fac498af3e304680380b4 Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Tue, 1 Apr 2014 18:39:05 +0000 Subject: [jit] Implement support for atomic intrinsics on arm. --- arm/arm-codegen.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arm/arm-codegen.h b/arm/arm-codegen.h index 5a3dba0..d4d7f7c 100644 --- a/arm/arm-codegen.h +++ b/arm/arm-codegen.h @@ -1107,6 +1107,18 @@ typedef union { #define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0))) #define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL) +/* ARMv7 */ + +typedef enum { + ARM_DMB_SY = 0xf, +} ArmDmbFlags; + +#define ARM_DMB(p, option) ARM_EMIT ((p), ((0xf << 28) | (0x57 << 20) | (0xf << 16) | (0xf << 12) | (0x0 << 8) | (0x5 << 4) | ((option) << 0))) + +#define ARM_LDREX_REG(p, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x1 << 20) | ((rn) << 16) | ((rt) << 12)) | (0xf << 8) | (0x9 << 4) | 0xf << 0) + +#define ARM_STREX_REG(p, rd, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x0 << 20) | ((rn) << 16) | ((rd) << 12)) | (0xf << 8) | (0x9 << 4) | ((rt) << 0)) + #ifdef __cplusplus } #endif -- cgit v1.1 From 1d58ec09524d6f4ce37f39698e68fb45a3c0231b Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 19 Apr 2014 17:03:21 +0200 Subject: [arm64] Add basic port infrastructure. --- Makefile.am | 2 +- arm64/Makefile.am | 0 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 arm64/Makefile.am diff --git a/Makefile.am b/Makefile.am index e7700ed..3d68794 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,4 +1,4 @@ -DIST_SUBDIRS = x86 ppc sparc arm s390x amd64 ia64 mips +DIST_SUBDIRS = x86 ppc sparc arm arm64 s390x amd64 ia64 mips AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) diff --git a/arm64/Makefile.am b/arm64/Makefile.am new file mode 100644 index 0000000..e69de29 -- cgit v1.1 From 62b813772cfa4af873a278c39dd1f01dc6e50c2e Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 19 Apr 2014 20:16:47 +0200 Subject: [arm64] Add JIT support. --- arm64/arm64-codegen.h | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 arm64/arm64-codegen.h diff --git a/arm64/arm64-codegen.h b/arm64/arm64-codegen.h new file mode 100644 index 0000000..259ff96 --- /dev/null +++ b/arm64/arm64-codegen.h @@ -0,0 +1,3 @@ +#include "../../../../mono-extensions/mono/arch/arm64/arm64-codegen.h" + + -- cgit v1.1 From edeeadda807c9189ad6b7cdd0f221c355ad95e52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 29 Apr 2014 16:56:12 +0200 Subject: Add .gitignore file in mono/arch/arm64. --- arm64/.gitignore | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 arm64/.gitignore diff --git a/arm64/.gitignore b/arm64/.gitignore new file mode 100644 index 0000000..13efac7 --- /dev/null +++ b/arm64/.gitignore @@ -0,0 +1,6 @@ +/ +/Makefile +/Makefile.in +/*.o +/*.lo +/.deps -- cgit v1.1 From a9db0d5b41d17cb7ff5788a63ce0eee1e01652b3 Mon Sep 17 00:00:00 2001 From: Neale Ferguson Date: Tue, 3 Jun 2014 11:52:00 -0400 Subject: Architectural level set to z10 instruction set --- s390x/s390x-codegen.h | 212 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 199 insertions(+), 13 deletions(-) diff --git a/s390x/s390x-codegen.h b/s390x/s390x-codegen.h index 3677b2d..47e6564 100644 --- a/s390x/s390x-codegen.h +++ b/s390x/s390x-codegen.h @@ -6,6 +6,7 @@ #define S390X_H #include #include +#include #define FLOAT_REGS 2 /* No. float registers for parms */ #define GENERAL_REGS 5 /* No. general registers for parms */ @@ -136,13 +137,16 @@ typedef enum { s390_fpc = 256, } S390SpecialRegister; -#define s390_is_imm16(val) ((glong)val >= (glong)-(1<<15) && \ - (glong)val <= (glong)((1<<15)-1)) -#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= 65535) +#define s390_is_imm16(val) ((glong)val >= (glong) SHRT_MIN && \ + (glong)val <= (glong) SHRT_MAX) +#define s390_is_imm32(val) ((glong)val >= (glong) INT_MIN && \ + (glong)val <= (glong) INT_MAX) +#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= (glong) USHRT_MAX) +#define s390_is_uimm32(val) ((glong)val >= 0 && (glong)val <= (glong) UINT_MAX) #define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575) #define s390_is_imm20(val) ((glong)val >= -524288 && (glong)val <= 524287) -#define s390_is_imm12(val) ((glong)val >= (glong)-(1<<11) && \ - (glong)val <= (glong)((1<<11)-1)) +#define s390_is_imm12(val) ((glong)val >= (glong)-4096 && \ + (glong)val <= (glong)4095) #define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095) #define STK_BASE s390_r15 @@ -349,7 +353,36 @@ typedef struct { short i2; char xx; char op2; -} RIE_Format; +} RIE_Format_1; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + short i2; + char m2 : 4; + char xx : 4; + char op2; +} RIE_Format_2; + +typedef struct { + char op1; + char r1 : 4; + char r3 : 4; + short d; + char i; + char op2; +} RIE_Format_3; + +typedef struct { + char op1; + char r1 : 4; + char yy : 4; + short i2; + char m3 : 4; + char xx : 4; + char op2; +} RIE_Format_4; typedef struct { char op1; @@ -427,12 +460,22 @@ typedef struct { typedef struct { short op; - char b1 : 4; + short tb1 : 4; short d1 : 12; - char b2 : 4; + short b2 : 4; short d2 : 12; } __attribute__ ((packed)) SSE_Format; +typedef struct { + short op; + char r3 : 4; + char o2 : 4; + short b1 : 4; + short d1 : 12; + short b2 : 4; + short d2 : 12; +} __attribute__ ((packed)) SSF_Format; + #define s390_emit16(c, x) do \ { \ *((guint16 *) c) = (guint16) x; \ @@ -509,12 +552,33 @@ typedef struct { #define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff))) -#define S390_RIE(c,opc,g1,g3,m2) do \ +#define S390_RIE_1(c,opc,g1,g3,m2) do \ { \ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \ } while (0) +#define S390_RIE_2(c,opc,g1,g2,m3,v) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \ + s390_emit16(c, (v)); \ + s390_emit16(c, ((m2) << 12 | (opc & 0xff))); \ +} while (0) + +#define S390_RIE_3(c,opc,g1,i,m3,d) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | m3)); \ + s390_emit16(c, (d)); \ + s390_emit16(c, ((i) << 8 | (opc & 0xff))); \ +} while (0) + +#define S390_RIE_4(c,opc,g1,i2,m3) do \ +{ \ + s390_emit16(c, ((opc & 0xff00) | (g1) << 4); \ + s390_emit16(c, (i2)); \ + s390_emit16(c, ((m3) << 12 | (opc & 0xff))); \ +} while (0) + #define S390_RIL_1(c,opc,g1,m2) do \ { \ s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \ @@ -527,6 +591,20 @@ typedef struct { s390_emit32(c, m2); \ } while (0) +#define S390_RIS(c,opc,r,i,m3,b,d) do \ +{ \ + s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \ + s390_emit16(c, ((b) << 12) | (d)); \ + s390_emit16(c, ((i) << 4) | ((opc) & 0xff)); \ +} + +#define S390_RRS(c,opc,r1,r2,m3,b,d) do \ +{ \ + s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \ + s390_emit16(c, ((b) << 12) | (d)); \ + s390_emit16(c, ((m3) << 12) | ((opc) & 0xff)); \ +} + #define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff))); #define S390_SIY(c,opc,s1,p1,m2) do \ @@ -573,23 +651,60 @@ typedef struct { s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ } while (0) +#define S390_SSF(c,opc,r3,s1,p1,s2,p2) do \ +{ \ + s390_emit16(c, (((opc) & 0xff00) << 8) | ((r3) << 4) | \ + ((opc) & 0xf)); \ + s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \ + s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \ +} while (0) + #define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d) #define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d) #define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2) #define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2) +#define s390_afi(c, r, v) S390_RIL_1(c, 0xc29, r, v); #define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d) #define s390_agf(c, r, x, b, d) S390_RXY(c, 0xe318, r, x, b, d) +#define s390_agfi(c, r, v) S390_RIL_1(c, 0xc28, r, v) +#define s390_afgr(c, r1, r2) S390_RRE(c, 0xb918, r1, r2) #define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v) +#define s390_aghik(c, r, v) S390_RIE_1(c, 0xecd9, r, v) #define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2) +#define s390_agrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e8, r1, r2, r3) +#define s390_agsi(c, r, v) S390_SIY(c, 0xeb7a, r v) +#define s390_ahhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9c8, r1, r2, r3) +#define s390_ahhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9d8, r1, r2, r3) #define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v) +#define s390_ahik(c, r, v) S390_RIE_1(c, 0xecd8, r, v) +#define s390_ahy(c, r, x, b, d) S390_RXY(c, 0xe37a, r, b, d) +#define s390_aih(c, r, v) S390_RIL_1(c, 0xcc8, r, v) +#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) +#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d) +#define s390_alcg(c, r, x, b, d) S390_RXY(c, 0xe388, r, x, b, d) #define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2) #define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2) -#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d) +#define s390_alfi(c, r, v) S390_RIL_1(c, 0xc2b, r, v) #define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d) #define s390_algf(c, r, x, b, d) S390_RXY(c, 0xe31a, r, x, b, d) +#define s390_algfi(c, r, v) S390_RIL_1(c, 0xc2a, r, v) +#define s390_algfr(c, r1, r2) S390_RRE(c, 0xb91a, r1, r2) +#define s390_alghsik(c, r, v) S390_RIE_1(c, 0xecd8, r, v) #define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2) +#define s390_algsi(c, r, v) S390_SIY(c, 0xeb7e, r, v) +#define s390_alhhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9ca, r1, r2, r3) +#define s390_alhhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9da, r1, r2, r3) +#define s390_alhsik(c, r, v) S390_RIE_1(c, 0xecda, r, v) #define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2) +#define s390_alrk(c, r1, r2) S390_RRF(c, 0xb9fa, r1, r2) +#define s390_alsi(c, r, v) S390_SIY(c, 0xeb6e, r, v) +#define s390_alsih(c, r, v) S390_RIL_1(c, 0xcca, r, v) +#define s390_alsihn(c, r, v) S390_RIL_1(c, 0xccb, r, v) +#define s390_aly(c, r, x, b, d) S390_RXY(c, 0xe35e, r, x, b, d) #define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2) +#define s390_ark(c, r1, r2, r3) S390_RRF_1(c, 0xb9f8, r1, r2, r3) +#define s390_asi(c, r, v) S390_SIY(c, 0xeb6a, r, v) +#define s390_ay(c, r, x, b, d) S390_RXY(c, 0xe35a, r, x, b, d) #define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2) #define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2) #define s390_bctrg(c, r1, r2) S390_RRE(c, 0xb946, r1, r2) @@ -610,19 +725,46 @@ typedef struct { #define s390_cdsg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb3e, r1, r2, b, d) #define s390_cdsy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb31, r1, r2, b, d) #define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2) +#define s390_cegbr(c, r1, r2) S390_RRE(c, 0xb3a4, r1, r2) #define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2) +#define s390_cfi(c, r, v) S390_RIL_1(c, 0xc2d, r, v) #define s390_cgdbr(c, r1, m, r2) S390_RRF_2(c, 0xb3a9, r1, m, r2) #define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d) +#define s390_cgfi(c, r, v) S390_RIL_1(c, 0xc2c, r, v) +#define s390_cgfrl(c, r, v) S390_RIL_1(c, 0xc6c, r, v) #define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i) +#define s390_cgib(c, r, i, m, b, d) S390_RIS(c, 0xecfc, r, i, m, b, d) +#define s390_cgij(c, r, i, m, d) S390_RIE_3(c, 0xec7c, r, i, m, d) +#define s390_cgit(c, r, i, m) S390_RIE_4(c, 0xec70, r, i m); #define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2) +#define s390_cgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece4, r1, r2, m3, b, d) +#define s390_cgrj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec64, r1, r2, m3, v) +#define s390_cgrl(c, r, v) S390_RIL_1(c, 0xc68, r, v) #define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i) +#define s390_cib(c, r, i, m, b, d) S390_RIS(c, 0xecfe, r, i, m, b, d) +#define s390_cij(c, r, i, m, d) S390_RIE_3(c, 0xec7e, r, i, m, d) +#define s390_cit(c, r, i, m) S390_RIE_4(c, 0xec72, r, i m); #define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d) #define s390_clg(c, r, x, b, d) S390_RXY(c, 0xe321, r, x, b, d) +#define s390_clgib(c, r, i, m, b, d) S390_RIS(c, 0xecfd, r, i, m, b, d) +#define s390_clgij(c, r, i, b) S390_RIE_3(c, 0xec7d, r, i, m, d) #define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2) +#define s390_clgrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec65, r1, r2, m, v) +#define s390_clgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece5, r1, r2, m3, b, d) +#define s390_clib(c, r, i, m, b, d) S390_RIS(c, 0xecff, r, i, m, b, d) +#define s390_clij(c, r, i, b) S390_RIE_3(c, 0xec7f, r, i, m, d) #define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2) +#define s390_clrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf7, r1, r2, m3, b, d) +#define s390_clrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec77, r1, r2, m, v) #define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2) +#define s390_crb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf6, r1, r2, m3, b, d) +#define s390_crj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec76, r1, r2, m3, v) +#define s390_crl(c, r, v) S390_RIL_1(c, 0xc6d, r, v) +#define s390_crt(c, r1, r2, m3) S390_RRF_2(c, 0xb972, r1, r2, m3); +#define s390_cgrt(c, r1, r2, m3) S390_RRF_2(c, 0xb960, r1, r2, m3); #define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d) #define s390_csg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb30, r1, r2, b, d) +#define s390_csst(c, d1, b1, d2, b2, r) S390_SSF(c, 0xc82, b1, d1, b2, d2, r) #define s390_csy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb14, r1, r2, b, d) #define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2) #define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2) @@ -637,6 +779,12 @@ typedef struct { #define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d) #define s390_icmy(c, r, x, b, d) S390_RXY(c, 0xeb81, r, x, b, d) #define s390_icy(c, r, x, b, d) S390_RXY(c, 0xe373, r, x, b, d) +#define s390_iihf(c, r, v) S390_RIL_1(c, 0xc08, r, v) +#define s390_iihh(c, r, v) S390_RI(c, 0xa50, r, v) +#define s390_iihl(c, r, v) S390_RI(c, 0xa51, r, v) +#define s390_iilf(c, r, v) S390_RIL_1(c, 0xc09, r, v) +#define s390_iilh(c, r, v) S390_RI(c, 0xa52, r, v) +#define s390_iill(c, r, v) S390_RI(c, 0xa53, r, v) #define s390_j(c,d) s390_brc(c, S390_CC_UN, d) #define s390_jc(c, m, d) s390_brc(c, m, d) #define s390_jcl(c, m, d) s390_brcl(c, m, d) @@ -690,23 +838,28 @@ typedef struct { #define s390_ldy(c, r, x, b, d) S390_RXY(c, 0xed65, r, x, b, d) #define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d) #define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2) +#define s390_ldgr(c, r1, r2) S390_RRE(c, 0xb3c1, r1, r2) #define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2) #define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d) #define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2) #define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2) #define s390_ley(c, r, x, b, d) S390_RXY(c, 0xed64, r, x, b, d) +#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) #define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d) #define s390_lgbr(c, r1, r2) S390_RRE(c, 0xb906, r1, r2) -#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d) +#define s390_lgdr(c, r1, r2) S390_RRE(c, 0xb3cd, r1, r2) #define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d) +#define s390_lgfi(c, r, v) S390_RIL_1(c, 0xc01, r, v) +#define s390_lgfrl(c, r1, d) S390_RIL_1(c, 0xc4c, r1, d) #define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2) #define s390_lgh(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) #define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v) +#define s390_lghr(c, r1, r2) S390_RRE(c, 0xb907, r1, r2) #define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2) +#define s390_lgrl(c, r1, d) S390_RIL_1(c, 0xc48, r1, d) #define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d) #define s390_lhr(c, r1, r2) S390_RRE(c, 0xb927, r1, r2) #define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d) -#define s390_lghr(c, r1, r2) S390_RRE(c, 0xb907, r1, r2) #define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v) #define s390_lhy(c, r, x, b, d) S390_RXY(c, 0xe378, r, x, b, d) #define s390_llcr(c, r1, r2) S390_RRE(c, 0xb994, r1, r2) @@ -717,6 +870,12 @@ typedef struct { #define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d) #define s390_llghr(c, r1, r2) S390_RRE(c, 0xb985, r1, r2) #define s390_llhr(c, r1, r2) S390_RRE(c, 0xb995, r1, r2) +#define s390_llihf(c, r, v) S390_RIL_1(c, 0xc0e, r, v) +#define s390_llihh(c, r, v) S390_RI(c, 0xa5c, r, v) +#define s390_llihl(c, r, v) S390_RI(c, 0xa5d, r, v) +#define s390_llilf(c, r, v) S390_RIL_1(c, 0xc0f, r, v) +#define s390_llilh(c, r, v) S390_RI(c, 0xa5e, r, v) +#define s390_llill(c, r, v) S390_RI(c, 0xa5f, r, v) #define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d) #define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d) #define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2) @@ -726,6 +885,7 @@ typedef struct { #define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2) #define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2) #define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2) +#define s390_lrl(c, r1, d) S390_RIL_1(c, 0xc4d, r1, d) #define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2) #define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2) #define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2) @@ -734,24 +894,44 @@ typedef struct { #define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d) #define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2) #define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2) +#define s390_mfy(c, r, x, b, d) S390_RXY(c, 0xe35c, r, x, b, d) #define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2) #define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2) #define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2) #define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d) +#define s390_msi(c, r, v) S390_RIL_1(c, 0xc21, r, v) #define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2) +#define s390_msgi(c, r, v) S390_RIL_1(c, 0xc20, r, v) #define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2) #define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2) #define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2) #define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2) #define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2) #define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d) +#define s390_nc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd4, l, b1, d1, b2, d2) #define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d) #define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2) +#define s390_ngrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e4, r1, r2, r3) +#define s390_ni(c, b, d, v) S390_SI(c, 0x94, b, d, v) +#define s390_nihf(c, r, v) S390_RIL_1(c, 0xc0a, r, v) +#define s390_nihh(c, r, v) S390_RI(c, 0xa54, r, v) +#define s390_nihl(c, r, v) S390_RI(c, 0xa55, r, v) +#define s390_nilf(c, r, v) S390_RIL_1(c, 0xc0b, r, v) #define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v) #define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v) +#define s390_niy(c, b, d, v) S390_SIY(c, 0xeb54, b, d, v) #define s390_nop(c) S390_RR(c, 0x07, 0x0, 0) #define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2) +#define s390_nrk(c, r1, r2) S390_RRF_1(c, 0xb9f4, r1, r2) +#define s390_ny(c, r, x, b, d) S390_RRY(c, 0xe354, r1, r2) #define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d) +#define s390_oihf(c, r, v) S390_RIL_1(c, 0xc0c, r, v) +#define s390_oihh(c, r, v) S390_RI(c, 0xa58, r, v) +#define s390_oihl(c, r, v) S390_RI(c, 0xa59, r, v) +#define s390_oilf(c, r, v) S390_RIL_1(c, 0xc0d, r, v) +#define s390_oilh(c, r, v) S390_RI(c, 0xa5a, r, v) +#define s390_oill(c, r, v) S390_RI(c, 0xa5b` r, v) +#define s390_oiy(c, b, d, v) S390_SIY(c, 0xeb56 b, d, v) #define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d) #define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2) #define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2) @@ -762,16 +942,19 @@ typedef struct { #define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d) #define s390_sgf(c, r, x, b, d) S390_RXY(c, 0xe319, r, x, b, d) #define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2) +#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) #define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d) #define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d) #define s390_slbg(c, r, x, b, d) S390_RXY(c, 0xe389, r, x, b, d) #define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2) #define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2) -#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d) #define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d) #define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d) +#define s390_slfi(c, r, v) S390_RIL_1(c, 0xc25, r, v) #define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d) #define s390_slgf(c, r, x, b, d) S390_RXY(c, 0xe31b, r, x, b, d) +#define s390_slgfr(c, r1, r2) S390_RRE(c, 0xb91b, r1, r2) +#define s390_slgfi(c, r, v) S390_RIL_1(c, 0xc24, r, v) #define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2) #define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d) #define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d) @@ -805,7 +988,10 @@ typedef struct { #define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d) #define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d) #define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d) +#define s390_xihf(c, r, v) S390_RIL_1(c, 0xc06, r, v) +#define s390_xilf(c, r, v) S390_RIL_1(c, 0xc07, r, v) #define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d) #define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2) #define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2) +#define s390_xy(c, r, x, b, d) S390_RXY(c, 0xe357, r, x, b, d) #endif -- cgit v1.1 From 69d89956fcc24cec955246588269cb7c8012b7cb Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Mon, 1 Sep 2014 13:25:07 -0400 Subject: [runtime] Remove the interpreter. --- Makefile.am | 14 -------------- ppc/Makefile.am | 4 ---- x86/Makefile.am | 4 ---- 3 files changed, 22 deletions(-) diff --git a/Makefile.am b/Makefile.am index 3d68794..8741687 100644 --- a/Makefile.am +++ b/Makefile.am @@ -2,24 +2,10 @@ DIST_SUBDIRS = x86 ppc sparc arm arm64 s390x amd64 ia64 mips AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) -if INTERP_SUPPORTED -SUBDIRS = $(arch_target) -else if ARM # arm needs to build some stuff even in JIT mode SUBDIRS = $(arch_target) endif -endif - -if INTERP_SUPPORTED - -noinst_LTLIBRARIES = libmonoarch.la - -libmonoarch_la_SOURCES = unknown.c - -libmonoarch_la_LIBADD = $(arch_target)/libmonoarch-$(arch_target).la - -endif EXTRA_DIST = ChangeLog diff --git a/ppc/Makefile.am b/ppc/Makefile.am index a4e2d5d..667ad25 100644 --- a/ppc/Makefile.am +++ b/ppc/Makefile.am @@ -1,5 +1,3 @@ -if INTERP_SUPPORTED - AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-ppc.la @@ -7,5 +5,3 @@ noinst_LTLIBRARIES = libmonoarch-ppc.la libmonoarch_ppc_la_SOURCES = tramp.c ppc-codegen.h noinst_PROGRAMS = test - -endif diff --git a/x86/Makefile.am b/x86/Makefile.am index 9778237..e88506e 100644 --- a/x86/Makefile.am +++ b/x86/Makefile.am @@ -1,9 +1,5 @@ -if INTERP_SUPPORTED - AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) noinst_LTLIBRARIES = libmonoarch-x86.la libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h - -endif -- cgit v1.1 From b8e69265771d2d730847add35620628ff003aed1 Mon Sep 17 00:00:00 2001 From: Rodrigo Kumpera Date: Tue, 9 Sep 2014 09:14:37 -0400 Subject: [cleanup] Remove more old files. --- amd64/Makefile.am | 7 +- amd64/tramp.c | 1054 ----------------------------------------------------- ppc/Makefile.am | 8 +- ppc/test.c | 35 -- ppc/tramp.c | 895 --------------------------------------------- x86/Makefile.am | 6 +- x86/test.c | 225 ------------ x86/tramp.c | 545 --------------------------- 8 files changed, 3 insertions(+), 2772 deletions(-) delete mode 100644 amd64/tramp.c delete mode 100644 ppc/test.c delete mode 100644 ppc/tramp.c delete mode 100644 x86/test.c delete mode 100644 x86/tramp.c diff --git a/amd64/Makefile.am b/amd64/Makefile.am index 3c72826..47daaaf 100644 --- a/amd64/Makefile.am +++ b/amd64/Makefile.am @@ -1,7 +1,2 @@ - -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-amd64.la - -libmonoarch_amd64_la_SOURCES = tramp.c amd64-codegen.h +EXTRA_DIST = amd64-codegen.h diff --git a/amd64/tramp.c b/amd64/tramp.c deleted file mode 100644 index 6dbec93..0000000 --- a/amd64/tramp.c +++ /dev/null @@ -1,1054 +0,0 @@ -/* - * Create trampolines to invoke arbitrary functions. - * - * Copyright (C) Ximian Inc. - * - * Author: - * Zalman Stern - * Based on code by: - * Paolo Molaro (lupus@ximian.com) - * Dietmar Maurer (dietmar@ximian.com) - * - * To understand this code, one will want to the calling convention section of the ABI sepc at: - * http://x86-64.org/abi.pdf - * and the AMD64 architecture docs found at amd.com . - */ - -#include "config.h" -#include -#include -#include "amd64-codegen.h" -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/marshal.h" - -/* - * The resulting function takes the form: - * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); - */ -#define FUNC_ADDR_POS 8 -#define RETVAL_POS 12 -#define THIS_POS 16 -#define ARGP_POS 20 -#define LOC_POS -4 - -#define ARG_SIZE sizeof (stackval) - -#define MAX_INT_ARG_REGS 6 -#define MAX_FLOAT_ARG_REGS 8 - -// TODO get these right. They are upper bounds anyway, so it doesn't much matter. -#define PUSH_INT_STACK_ARG_SIZE 16 -#define MOVE_INT_REG_ARG_SIZE 16 -#define PUSH_FLOAT_STACK_ARG_SIZE 16 -#define MOVE_FLOAT_REG_ARG_SIZE 16 -#define COPY_STRUCT_STACK_ARG_SIZE 16 - -/* Maps an argument number (starting at 0) to the register it is passed in (if it fits). - * E.g. int foo(int bar, int quux) has the foo arg in RDI and the quux arg in RSI - * There is no such map for floating point args as they go in XMM0-XMM7 in order and thus the - * index is the register number. - */ -static int int_arg_regs[] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 }; - -/* This next block of code resolves the ABI rules for passing structures in the argument registers. - * These basically amount to "Use up to two registers if they are all integer or all floating point. - * If the structure is bigger than two registers or would be in one integer register and one floating point, - * it is passed in memory instead. - * - * It is possible this code needs to be recursive to be correct in the case when one of the structure members - * is itself a structure. - * - * The 80-bit floating point stuff is ignored. - */ -typedef enum { - ARG_IN_MEMORY, - ARG_IN_INT_REGS, - ARG_IN_FLOAT_REGS -} struct_arg_type; - -static struct_arg_type compute_arg_type(MonoType *type) -{ - guint32 simpletype = type->type; - - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - return ARG_IN_INT_REGS; - break; - case MONO_TYPE_VALUETYPE: { - if (type->data.klass->enumtype) - return ARG_IN_INT_REGS; - return ARG_IN_MEMORY; - break; - } - case MONO_TYPE_R4: - case MONO_TYPE_R8: - return ARG_IN_FLOAT_REGS; - break; - default: - g_error ("Can't trampoline 0x%x", type->type); - } - - return ARG_IN_MEMORY; -} - -static struct_arg_type value_type_info(MonoClass *klass, int *native_size, int *regs_used, int *offset1, int *size1, int *offset2, int *size2) -{ - MonoMarshalType *info = mono_marshal_load_type_info (klass); - - *native_size = info->native_size; - - if (info->native_size > 8 || info->num_fields > 2) - { - *regs_used = 0; - *offset1 = -1; - *offset2 = -1; - return ARG_IN_MEMORY; - } - - if (info->num_fields == 1) - { - struct_arg_type result = compute_arg_type(info->fields[0].field->type); - if (result != ARG_IN_MEMORY) - { - *regs_used = 1; - *offset1 = info->fields[0].offset; - *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); - } - else - { - *regs_used = 0; - *offset1 = -1; - } - - *offset2 = -1; - return result; - } - - struct_arg_type result1 = compute_arg_type(info->fields[0].field->type); - struct_arg_type result2 = compute_arg_type(info->fields[0].field->type); - - if (result1 == result2 && result1 != ARG_IN_MEMORY) - { - *regs_used = 2; - *offset1 = info->fields[0].offset; - *size1 = mono_marshal_type_size (info->fields[0].field->type, info->fields[0].mspec, NULL, 1, 1); - *offset2 = info->fields[1].offset; - *size2 = mono_marshal_type_size (info->fields[1].field->type, info->fields[1].mspec, NULL, 1, 1); - return result1; - } - - return ARG_IN_MEMORY; -} - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - unsigned char *p, *code_buffer; - guint32 stack_size = 0, code_size = 50; - guint32 arg_pos, simpletype; - int i; - static GHashTable *cache = NULL; - MonoPIFunc res; - - guint32 int_arg_regs_used = 0; - guint32 float_arg_regs_used = 0; - guint32 next_int_arg_reg = 0; - guint32 next_float_arg_reg = 0; - /* Indicates that the return value is filled in inside the called function. */ - int retval_implicit = 0; - char *arg_in_reg_bitvector; /* A set index by argument number saying if it is in a register - (integer or floating point according to type) */ - - if (!cache) - cache = g_hash_table_new ((GHashFunc)mono_signature_hash, - (GCompareFunc)mono_metadata_signature_equal); - - if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) - return res; - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { - int_arg_regs_used++; - code_size += MOVE_INT_REG_ARG_SIZE; - } - - if (sig->hasthis) { - int_arg_regs_used++; - code_size += MOVE_INT_REG_ARG_SIZE; - } - - /* Run through stuff to calculate code size and argument bytes that will be pushed on stack (stack_size). */ - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - if (int_arg_regs_used++ > MAX_INT_ARG_REGS) { - stack_size += 8; - code_size += PUSH_INT_STACK_ARG_SIZE; - } - else - code_size += MOVE_INT_REG_ARG_SIZE; - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - if (arg_type == ARG_IN_INT_REGS && - (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) - { - code_size += MOVE_INT_REG_ARG_SIZE; - int_arg_regs_used += regs_used; - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) - { - code_size += MOVE_FLOAT_REG_ARG_SIZE; - float_arg_regs_used += regs_used; - break; - } - - /* Else item is in memory. */ - - stack_size += size + 7; - stack_size &= ~7; - code_size += COPY_STRUCT_STACK_ARG_SIZE; - - break; - } - case MONO_TYPE_R4: - case MONO_TYPE_R8: - if (float_arg_regs_used++ > MAX_FLOAT_ARG_REGS) { - stack_size += 8; - code_size += PUSH_FLOAT_STACK_ARG_SIZE; - } - else - code_size += MOVE_FLOAT_REG_ARG_SIZE; - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - /* - * FIXME: take into account large return values. - * (Comment carried over from IA32 code. Not sure what it means :-) - */ - - code_buffer = p = alloca (code_size); - - /* - * Standard function prolog. - */ - amd64_push_reg (p, AMD64_RBP); - amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); - /* - * and align to 16 byte boundary... - */ - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - retval_implicit = 1; - } - } - - if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { - /* Push the retval register so it is saved across the call. It will be addressed via RBP later. */ - amd64_push_reg (p, AMD64_RSI); - stack_size += 8; - } - - /* Ensure stack is 16 byte aligned when entering called function as required by calling convention. - * Getting this wrong results in a general protection fault on an SSE load or store somewhere in the - * code called under the trampoline. - */ - if ((stack_size & 15) != 0) - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 16 - (stack_size & 15)); - - /* - * On entry to generated function: - * RDI has target function address - * RSI has return value location address - * RDX has this pointer address - * RCX has the pointer to the args array. - * - * Inside the stub function: - * R10 holds the pointer to the args - * R11 holds the target function address. - * The return value address is pushed on the stack. - * The this pointer is moved into the first arg register at the start. - * - * Optimization note: we could keep the args pointer in RCX and then - * load over itself at the end. Ditto the callee addres could be left in RDI in some cases. - */ - - /* Move args pointer to temp register. */ - amd64_mov_reg_reg (p, AMD64_R10, AMD64_RCX, 8); - amd64_mov_reg_reg (p, AMD64_R11, AMD64_RDI, 8); - - /* First args register gets return value pointer, if need be. - * Note that "byref" equal true means the called function returns a pointer. - */ - if (retval_implicit) { - amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RSI, 8); - next_int_arg_reg++; - } - - /* this pointer goes in next args register. */ - if (sig->hasthis) { - amd64_mov_reg_reg (p, int_arg_regs[next_int_arg_reg], AMD64_RDX, 8); - next_int_arg_reg++; - } - - /* - * Generate code to handle arguments in registers. Stack arguments will happen in a loop after this. - */ - arg_in_reg_bitvector = (char *)alloca((sig->param_count + 7) / 8); - memset(arg_in_reg_bitvector, 0, (sig->param_count + 7) / 8); - - /* First, load all the arguments that are passed in registers into the appropriate registers. - * Below there is another loop to handle arguments passed on the stack. - */ - for (i = 0; i < sig->param_count; i++) { - arg_pos = ARG_SIZE * i; - - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_marshal: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_CLASS: - if (next_int_arg_reg < MAX_INT_ARG_REGS) { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos, 8); - next_int_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_R4: - if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); - next_float_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_R8: - if (next_float_arg_reg < MAX_FLOAT_ARG_REGS) { - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos); - next_float_arg_reg++; - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - } - break; - case MONO_TYPE_VALUETYPE: { - if (!sig->params [i]->data.klass->enumtype) { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS && - (next_int_arg_reg + regs_used) <= MAX_INT_ARG_REGS) - { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset1, size1); - next_int_arg_reg++; - if (regs_used > 1) - { - amd64_mov_reg_membase (p, int_arg_regs[next_int_arg_reg], AMD64_R10, arg_pos + offset2, size2); - next_int_arg_reg++; - } - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (next_float_arg_reg + regs_used) <= MAX_FLOAT_ARG_REGS) - { - if (size1 == 4) - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); - else - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset1); - next_float_arg_reg++; - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); - else - amd64_movsd_reg_membase (p, next_float_arg_reg, AMD64_R10, arg_pos + offset2); - next_float_arg_reg++; - } - arg_in_reg_bitvector[i >> 3] |= (1 << (i & 7)); - break; - } - - /* Structs in memory are handled in the next loop. */ - } else { - /* it's an enum value */ - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_marshal; - } - break; - } - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - /* Handle stack arguments, pushing the rightmost argument first. */ - for (i = sig->param_count; i > 0; --i) { - arg_pos = ARG_SIZE * (i - 1); - if (sig->params [i - 1]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i - 1]->type; -enum_marshal2: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_CLASS: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_R4: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_R8: - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) { - amd64_push_membase (p, AMD64_R10, arg_pos); - } - break; - case MONO_TYPE_VALUETYPE: - if (!sig->params [i - 1]->data.klass->enumtype) { - if ((arg_in_reg_bitvector[(i - 1) >> 3] & (1 << ((i - 1) & 7))) == 0) - { - int ss = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); - ss += 7; - ss &= ~7; - - amd64_alu_reg_imm(p, X86_SUB, AMD64_RSP, ss); - /* Count register */ - amd64_mov_reg_imm(p, AMD64_RCX, ss); - /* Source register */ - amd64_lea_membase(p, AMD64_RSI, AMD64_R10, arg_pos); - /* Dest register */ - amd64_mov_reg_reg(p, AMD64_RDI, AMD64_RSP, 8); - - /* AMD64 calling convention guarantees direction flag is clear at call boundary. */ - x86_prefix(p, AMD64_REX(AMD64_REX_W)); - x86_prefix(p, X86_REP_PREFIX); - x86_movsb(p); - } - } else { - /* it's an enum value */ - simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; - goto enum_marshal2; - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); - } - } - - /* TODO: Set RAL to number of XMM registers used in case this is a varags function? */ - - /* - * Insert call to function - */ - amd64_call_reg (p, AMD64_R11); - - if (sig->ret->byref || string_ctor || !(retval_implicit || sig->ret->type == MONO_TYPE_VOID)) { - amd64_mov_reg_membase(p, AMD64_RSI, AMD64_RBP, -8, SIZEOF_VOID_P); - } - /* - * Handle retval. - * Small integer and pointer values are in EAX. - * Long integers are in EAX:EDX. - * FP values are on the FP stack. - */ - - if (sig->ret->byref || string_ctor) { - simpletype = MONO_TYPE_PTR; - } else { - simpletype = sig->ret->type; - } - enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - case MONO_TYPE_PTR: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); - break; - case MONO_TYPE_R4: - amd64_movss_regp_reg (p, AMD64_RSI, AMD64_XMM0); - break; - case MONO_TYPE_R8: - amd64_movsd_regp_reg (p, AMD64_RSI, AMD64_XMM0); - break; - case MONO_TYPE_I8: - amd64_mov_regp_reg (p, AMD64_RSI, X86_EAX, 8); - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS) - { - amd64_mov_membase_reg (p, AMD64_RSI, offset1, AMD64_RAX, size1); - if (regs_used > 1) - amd64_mov_membase_reg (p, AMD64_RSI, offset2, AMD64_RDX, size2); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS) - { - if (size1 == 4) - amd64_movss_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); - else - amd64_movsd_membase_reg (p, AMD64_RSI, offset1, AMD64_XMM0); - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); - else - amd64_movsd_membase_reg (p, AMD64_RSI, offset2, AMD64_XMM1); - } - break; - } - - /* Else result should have been stored in place already. */ - break; - } - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - - /* - * Standard epilog. - */ - amd64_leave (p); - amd64_ret (p); - - g_assert (p - code_buffer < code_size); - res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); - - g_hash_table_insert (cache, sig, res); - - return res; -} - -/* - * Returns a pointer to a native function that can be used to - * call the specified method. - * The function created will receive the arguments according - * to the call convention specified in the method. - * This function works by creating a MonoInvocation structure, - * filling the fields in and calling ves_exec_method on it. - * Still need to figure out how to handle the exception stuff - * across the managed/unmanaged boundary. - */ -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig; - MonoJitInfo *ji; - unsigned char *p, *code_buffer; - guint32 simpletype; - gint32 local_size; - gint32 stackval_pos; - gint32 mono_invocation_pos; - int i, cpos; - int *vtbuf; - int *rbpoffsets; - int int_arg_regs_used = 0; - int float_arg_regs_used = 0; - int stacked_args_size = 0; /* bytes of register passed arguments pushed on stack for safe keeping. Used to get alignment right. */ - int next_stack_arg_rbp_offset = 16; - int retval_ptr_rbp_offset = 0; - int this_reg = -1; /* Remember register this ptr is in. */ - - /* - * If it is a static P/Invoke method, we can just return the pointer - * to the method implementation. - */ - if (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL && ((MonoMethodPInvoke*) method)->addr) { - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = 1; - ji->code_start = ((MonoMethodPInvoke*) method)->addr; - - mono_jit_info_table_add (mono_get_root_domain (), ji); - return ((MonoMethodPInvoke*) method)->addr; - } - - sig = method->signature; - - code_buffer = p = alloca (512); /* FIXME: check for overflows... */ - vtbuf = alloca (sizeof(int)*sig->param_count); - rbpoffsets = alloca (sizeof(int)*sig->param_count); - - - /* - * Standard function prolog. - */ - amd64_push_reg (p, AMD64_RBP); - amd64_mov_reg_reg (p, AMD64_RBP, AMD64_RSP, 8); - - /* If there is an implicit return value pointer in the first args reg, save it now so - * the result can be stored through the pointer at the end. - */ - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) - { - amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); - int_arg_regs_used++; - stacked_args_size += 8; - retval_ptr_rbp_offset = -stacked_args_size; - } - - /* - * If there is a this pointer, remember the number of the register it is in. - */ - if (sig->hasthis) { - this_reg = int_arg_regs[int_arg_regs_used++]; - } - - /* Put all arguments passed in registers on the stack. - * Record offsets from RBP to each argument. - */ - cpos = 0; - - for (i = 0; i < sig->param_count; i++) { - if (sig->params [i]->byref) - simpletype = MONO_TYPE_PTR; - else - simpletype = sig->params [i]->type; -enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_I8: - if (int_arg_regs_used < MAX_INT_ARG_REGS) { - amd64_push_reg (p, int_arg_regs[int_arg_regs_used]); - int_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - case MONO_TYPE_VALUETYPE: { - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - else - { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS && - (int_arg_regs_used + regs_used) <= MAX_INT_ARG_REGS) - { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); - stacked_args_size += size; - rbpoffsets[i] = stacked_args_size; - - amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset1, size1); - int_arg_regs_used++; - if (regs_used > 1) - { - amd64_mov_reg_membase (p, int_arg_regs[int_arg_regs_used], AMD64_RSP, offset2, size2); - int_arg_regs_used++; - } - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS && - (float_arg_regs_used + regs_used) <= MAX_FLOAT_ARG_REGS) - { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, size); - stacked_args_size += size; - rbpoffsets[i] = stacked_args_size; - - if (size1 == 4) - amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); - else - amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset1); - float_arg_regs_used++; - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); - else - amd64_movsd_reg_membase (p, float_arg_regs_used, AMD64_RSP, offset2); - float_arg_regs_used++; - } - break; - } - - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += size; - } - break; - } - case MONO_TYPE_R4: - if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); - amd64_movss_regp_reg (p, AMD64_RSP, float_arg_regs_used); - float_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - case MONO_TYPE_R8: - stacked_args_size += 8; - if (float_arg_regs_used < MAX_FLOAT_ARG_REGS) { - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, 8); - amd64_movsd_regp_reg (p, AMD64_RSP, float_arg_regs_used); - float_arg_regs_used++; - stacked_args_size += 8; - rbpoffsets[i] = -stacked_args_size; - } - else - { - rbpoffsets[i] = next_stack_arg_rbp_offset; - next_stack_arg_rbp_offset += 8; - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1) + stacked_args_size; - - local_size += 15; - local_size &= ~15; - - stackval_pos = -local_size; - mono_invocation_pos = stackval_pos + sizeof (stackval) * (sig->param_count + 1); - - /* stacked_args_size has already been pushed onto the stack. Make room for the rest of it. */ - amd64_alu_reg_imm (p, X86_SUB, AMD64_RSP, local_size - stacked_args_size); - - /* Be careful not to trash any arg regs before saving this_reg to MonoInvocation structure below. */ - - /* - * Initialize MonoInvocation fields, first the ones known now. - */ - amd64_alu_reg_reg (p, X86_XOR, AMD64_RAX, AMD64_RAX); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex)), AMD64_RAX, SIZEOF_VOID_P); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), AMD64_RAX, SIZEOF_VOID_P); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, parent)), AMD64_RAX, SIZEOF_VOID_P); - /* - * Set the method pointer. - */ - amd64_mov_membase_imm (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, method)), (long)method, SIZEOF_VOID_P); - - /* - * Handle this. - */ - if (sig->hasthis) - amd64_mov_membase_reg(p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, obj)), this_reg, SIZEOF_VOID_P); - - /* - * Handle the arguments. stackval_pos is the offset from RBP of the stackval in the MonoInvocation args array . - * arg_pos is the offset from RBP to the incoming arg on the stack. - * We just call stackval_from_data to handle all the (nasty) issues.... - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, stack_args)), AMD64_RAX, SIZEOF_VOID_P); - for (i = 0; i < sig->param_count; ++i) { -/* Need to call stackval_from_data (MonoType *type, stackval *result, char *data, gboolean pinvoke); */ - amd64_mov_reg_imm (p, AMD64_R11, stackval_from_data); - amd64_mov_reg_imm (p, int_arg_regs[0], sig->params[i]); - amd64_lea_membase (p, int_arg_regs[1], AMD64_RBP, stackval_pos); - amd64_lea_membase (p, int_arg_regs[2], AMD64_RBP, rbpoffsets[i]); - amd64_mov_reg_imm (p, int_arg_regs[3], sig->pinvoke); - amd64_call_reg (p, AMD64_R11); - stackval_pos += sizeof (stackval); -#if 0 - /* fixme: alignment */ - if (sig->pinvoke) - arg_pos += mono_type_native_stack_size (sig->params [i], &align); - else - arg_pos += mono_type_stack_size (sig->params [i], &align); -#endif - } - - /* - * Handle the return value storage area. - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - amd64_mov_membase_reg (p, AMD64_RBP, (mono_invocation_pos + G_STRUCT_OFFSET (MonoInvocation, retval)), AMD64_RAX, SIZEOF_VOID_P); - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - amd64_mov_reg_membase (p, AMD64_RCX, AMD64_RBP, retval_ptr_rbp_offset, SIZEOF_VOID_P); - amd64_mov_membase_reg (p, AMD64_RBP, stackval_pos, AMD64_RCX, SIZEOF_VOID_P); - } - } - - /* - * Call the method. - */ - amd64_lea_membase (p, int_arg_regs[0], AMD64_RBP, mono_invocation_pos); - amd64_mov_reg_imm (p, AMD64_R11, ves_exec_method); - amd64_call_reg (p, AMD64_R11); - - /* - * Move the return value to the proper place. - */ - amd64_lea_membase (p, AMD64_RAX, AMD64_RBP, stackval_pos); - if (sig->ret->byref) { - amd64_mov_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, SIZEOF_VOID_P); - } else { - int simpletype = sig->ret->type; - enum_retvalue: - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 4); - break; - case MONO_TYPE_I8: - amd64_movzx_reg_membase (p, AMD64_RAX, AMD64_RAX, 0, 8); - break; - case MONO_TYPE_R4: - amd64_movss_regp_reg (p, AMD64_RAX, AMD64_XMM0); - break; - case MONO_TYPE_R8: - amd64_movsd_regp_reg (p, AMD64_RAX, AMD64_XMM0); - break; - case MONO_TYPE_VALUETYPE: { - int size; - int arg_type; - int regs_used; - int offset1; - int size1; - int offset2; - int size2; - - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - - arg_type = value_type_info(sig->params [i]->data.klass, &size, ®s_used, &offset1, &size1, &offset2, &size2); - - if (arg_type == ARG_IN_INT_REGS) - { - if (regs_used > 1) - amd64_mov_membase_reg (p, AMD64_RAX, offset2, AMD64_RDX, size2); - amd64_mov_membase_reg (p, AMD64_RAX, offset1, AMD64_RAX, size1); - break; - } - - if (arg_type == ARG_IN_FLOAT_REGS) - { - if (size1 == 4) - amd64_movss_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); - else - amd64_movsd_membase_reg (p, AMD64_RAX, offset1, AMD64_XMM0); - - if (regs_used > 1) - { - if (size2 == 4) - amd64_movss_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); - else - amd64_movsd_membase_reg (p, AMD64_RAX, offset2, AMD64_XMM1); - } - break; - } - - /* Else result should have been stored in place already. IA32 code has a stackval_to_data call here, which - * looks wrong to me as the pointer in the stack val being converted is setup to point to the output area anyway. - * It all looks a bit suspect anyway. - */ - break; - } - default: - g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); - break; - } - } - - /* - * Standard epilog. - */ - amd64_leave (p); - amd64_ret (p); - - g_assert (p - code_buffer < 512); - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = p - code_buffer; - ji->code_start = g_memdup (code_buffer, p - code_buffer); - - mono_jit_info_table_add (mono_get_root_domain (), ji); - - return ji->code_start; -} diff --git a/ppc/Makefile.am b/ppc/Makefile.am index 667ad25..9b209ef 100644 --- a/ppc/Makefile.am +++ b/ppc/Makefile.am @@ -1,7 +1 @@ -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-ppc.la - -libmonoarch_ppc_la_SOURCES = tramp.c ppc-codegen.h - -noinst_PROGRAMS = test +EXTRA_DIST = ppc-codegen.h \ No newline at end of file diff --git a/ppc/test.c b/ppc/test.c deleted file mode 100644 index c19358d..0000000 --- a/ppc/test.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "ppc-codegen.h" -#include - -/* don't run the resulting program, it will destroy your computer, - * just objdump -d it to inspect we generated the correct assembler. - * On Mac OS X use otool[64] -v -t - */ - -int main() { - guint8 code [16000]; - guint8 *p = code; - guint8 *cp; - - printf (".text\n.align 4\n.globl main\n"); -#ifndef __APPLE__ - printf (".type main,@function\n"); -#endif - printf ("main:\n"); - - ppc_stwu (p, ppc_r1, -32, ppc_r1); - ppc_mflr (p, ppc_r0); - ppc_stw (p, ppc_r31, 28, ppc_r1); - ppc_or (p, ppc_r1, ppc_r2, ppc_r3); - ppc_mr (p, ppc_r31, ppc_r1); - ppc_lwz (p, ppc_r11, 0, ppc_r1); - ppc_mtlr (p, ppc_r0); - ppc_blr (p); - ppc_addi (p, ppc_r6, ppc_r6, 16); - - for (cp = code; cp < p; cp++) { - printf (".byte 0x%x\n", *cp); - } - - return 0; -} diff --git a/ppc/tramp.c b/ppc/tramp.c deleted file mode 100644 index 6bb1896..0000000 --- a/ppc/tramp.c +++ /dev/null @@ -1,895 +0,0 @@ -/* - * Create trampolines to invoke arbitrary functions. - * - * Copyright (C) Radek Doulik - * - */ - -#include "config.h" -#include -#include -#include "ppc-codegen.h" -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" - -#ifdef NEED_MPROTECT -#include -#include /* for PAGESIZE */ -#ifndef PAGESIZE -#define PAGESIZE 4096 -#endif -#endif - -#define DEBUG(x) - -/* gpointer -fake_func (gpointer (*callme)(gpointer), stackval *retval, void *this_obj, stackval *arguments) -{ - guint32 i = 0xc002becd; - - callme = (gpointer) 0x100fabcd; - - *(gpointer*)retval = (gpointer)(*callme) (arguments [0].data.p, arguments [1].data.p, arguments [2].data.p); - *(gdouble*) retval = (gdouble)(*callme) (arguments [0].data.f); - - return (gpointer) (*callme) (((MonoType *)arguments [0]. data.p)->data.klass); -} */ - -#define MIN_CACHE_LINE 8 - -static void inline -flush_icache (guint8 *code, guint size) -{ - guint i; - guint8 *p; - - p = code; - for (i = 0; i < size; i += MIN_CACHE_LINE, p += MIN_CACHE_LINE) { - asm ("dcbst 0,%0;" : : "r"(p) : "memory"); - } - asm ("sync"); - p = code; - for (i = 0; i < size; i += MIN_CACHE_LINE, p += MIN_CACHE_LINE) { - asm ("icbi 0,%0; sync;" : : "r"(p) : "memory"); - } - asm ("sync"); - asm ("isync"); -} - -static void -disassemble (guint8 *code, int size) -{ - int i; - FILE *ofd; - const char *tmp = g_getenv("TMP"); - char *as_file; - char *o_file; - char *cmd; - - if (tmp == NULL) - tmp = "/tmp"; - as_file = g_strdup_printf ("%s/test.s", tmp); - - if (!(ofd = fopen (as_file, "w"))) - g_assert_not_reached (); - - fprintf (ofd, "tmp:\n"); - - for (i = 0; i < size; ++i) - fprintf (ofd, ".byte %d\n", (unsigned int) code [i]); - - fclose (ofd); -#ifdef __APPLE__ -#define DIS_CMD "otool -V -v -t" -#else -#define DIS_CMD "objdump -d" -#endif - o_file = g_strdup_printf ("%s/test.o", tmp); - cmd = g_strdup_printf ("as %s -o %s", as_file, o_file); - system (cmd); - g_free (cmd); - cmd = g_strdup_printf (DIS_CMD " %s", o_file); - system (cmd); - g_free (cmd); - g_free (o_file); - g_free (as_file); -} - - -#define NOT_IMPLEMENTED(x) \ - g_error ("FIXME: %s is not yet implemented. (trampoline)", x); - -#define PROLOG_INS 8 -#define CALL_INS 2 -#define EPILOG_INS 6 -#define FLOAT_REGS 8 -#define GENERAL_REGS 8 -#ifdef __APPLE__ -#define MINIMAL_STACK_SIZE 10 -#define ALWAYS_ON_STACK(s) s -#define FP_ALSO_IN_REG(s) s -#define RET_ADDR_OFFSET 8 -#define STACK_PARAM_OFFSET 24 -#else -#define MINIMAL_STACK_SIZE 5 -#define ALWAYS_ON_STACK(s) -#define FP_ALSO_IN_REG(s) s -#define ALIGN_DOUBLES -#define RET_ADDR_OFFSET 4 -#define STACK_PARAM_OFFSET 8 -#endif - -static void inline -add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple) -{ - if (simple) { - if (*gr >= GENERAL_REGS) { - *stack_size += 4; - *code_size += 8; /* load from stack, save on stack */ - } else { - ALWAYS_ON_STACK (*stack_size += 4); - *code_size += 4; /* load from stack */ - } - } else { - if (*gr >= GENERAL_REGS - 1) { - *stack_size += 8; -#ifdef ALIGN_DOUBLES - *stack_size += (*stack_size % 8); -#endif - *code_size += 16; /* 2x load from stack, 2x save to stack */ - } else { - ALWAYS_ON_STACK (*stack_size += 8); - *code_size += 8; /* 2x load from stack */ - } -#ifdef ALIGN_DOUBLES - if ((*gr) & 1) - (*gr) ++; -#endif - (*gr) ++; - } - (*gr) ++; -} - -static void inline -calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size, gboolean string_ctor, gboolean *use_memcpy) -{ - guint i, fr, gr; - guint32 simpletype; - - fr = gr = 0; - *stack_size = MINIMAL_STACK_SIZE*4; - *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS)*4; - - if (sig->hasthis) { - add_general (&gr, stack_size, code_size, TRUE); - } - DEBUG(printf("params: %d\n", sig->param_count)); - for (i = 0; i < sig->param_count; ++i) { - DEBUG(printf("param %d: ", i)); - if (sig->params [i]->byref) { - DEBUG(printf("byref\n")); - add_general (&gr, stack_size, code_size, TRUE); - continue; - } - simpletype = sig->params [i]->type; - enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - add_general (&gr, stack_size, code_size, TRUE); - break; - case MONO_TYPE_SZARRAY: - add_general (&gr, stack_size, code_size, TRUE); - *code_size += 4; - break; - case MONO_TYPE_VALUETYPE: { - gint size; - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - size = mono_class_value_size (sig->params [i]->data.klass, NULL); - if (size != 4) { - DEBUG(printf ("copy %d bytes struct on stack\n", - mono_class_value_size (sig->params [i]->data.klass, NULL))); - *use_memcpy = TRUE; - *code_size += 8*4; - *stack_size += (size + 3) & (~3); - if (gr > GENERAL_REGS) { - *code_size += 4; - *stack_size += 4; - } - } else { - DEBUG(printf ("load %d bytes struct\n", - mono_class_value_size (sig->params [i]->data.klass, NULL))); - add_general (&gr, stack_size, code_size, TRUE); - *code_size += 4; - } - break; - } - case MONO_TYPE_I8: - add_general (&gr, stack_size, code_size, FALSE); - break; - case MONO_TYPE_R4: - if (fr < 7) { - *code_size += 4; - fr ++; - FP_ALSO_IN_REG (gr ++); - ALWAYS_ON_STACK (*stack_size += 4); - } else { - NOT_IMPLEMENTED ("R4 arg"); - } - break; - case MONO_TYPE_R8: - if (fr < 7) { - *code_size += 4; - fr ++; - FP_ALSO_IN_REG (gr += 2); - ALWAYS_ON_STACK (*stack_size += 8); - } else { - NOT_IMPLEMENTED ("R8 arg"); - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - if (sig->ret->byref || string_ctor) { - *code_size += 8; - } else { - simpletype = sig->ret->type; -enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_R4: - case MONO_TYPE_R8: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - *code_size += 8; - break; - case MONO_TYPE_I8: - *code_size += 12; - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - *code_size += 2*4; - break; - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - } - - if (*use_memcpy) { - *stack_size += 2*4; /* for r14, r15 */ - *code_size += 6*4; - if (sig->hasthis) { - *stack_size += 4; /* for r16 */ - *code_size += 4; - } - } - - /* align stack size to 16 */ - DEBUG (printf (" stack size: %d (%d)\n code size: %d\n", (*stack_size + 15) & ~15, *stack_size, *code_size)); - *stack_size = (*stack_size + 15) & ~15; -} - -static inline guint8 * -emit_prolog (guint8 *p, MonoMethodSignature *sig, guint stack_size) -{ - /* function prolog */ - ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ - ppc_mflr (p, ppc_r0); /* r0 <--- LR */ - ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ - ppc_stw (p, ppc_r0, stack_size + RET_ADDR_OFFSET, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ - ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ - - return p; -} - -#define ARG_BASE ppc_r12 -#define ARG_SIZE sizeof (stackval) -#define SAVE_4_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); \ - gr ++; \ - ALWAYS_ON_STACK (stack_par_pos += 4); \ - } else { \ - ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); \ - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ - stack_par_pos += 4; \ - } -#define SAVE_4_VAL_IN_GENERIC_REGISTER \ - if (gr < GENERAL_REGS) { \ - ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); \ - ppc_lwz (p, ppc_r3 + gr, 0, ppc_r3 + gr); \ - gr ++; \ - ALWAYS_ON_STACK (stack_par_pos += 4); \ - } else { \ - ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); \ - ppc_lwz (p, ppc_r11, 0, ppc_r11); \ - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); \ - stack_par_pos += 4; \ - } - -inline static guint8* -emit_save_parameters (guint8 *p, MonoMethodSignature *sig, guint stack_size, gboolean use_memcpy) -{ - guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos; - guint32 simpletype; - - fr = gr = 0; - stack_par_pos = STACK_PARAM_OFFSET; - - ppc_stw (p, ppc_r4, stack_size - 12, ppc_r31); /* preserve "retval", sp[+8] */ - - if (use_memcpy) { - ppc_stw (p, ppc_r14, stack_size - 16, ppc_r31); /* save r14 */ - ppc_stw (p, ppc_r15, stack_size - 20, ppc_r31); /* save r15 */ - ppc_mr (p, ppc_r14, ppc_r3); /* keep "callme" in register */ - ppc_mr (p, ppc_r15, ppc_r6); /* keep "arguments" in register */ - } else { - ppc_mr (p, ppc_r12, ppc_r6); /* keep "arguments" in register */ - ppc_mr (p, ppc_r0, ppc_r3); /* keep "callme" in register */ - } - - if (sig->hasthis) { - if (use_memcpy) { - ppc_stw (p, ppc_r16, stack_size - 24, ppc_r31); /* save r16 */ - ppc_mr (p, ppc_r16, ppc_r5); - } else - ppc_mr (p, ppc_r3, ppc_r5); - gr ++; - ALWAYS_ON_STACK (stack_par_pos += 4); - } - - if (use_memcpy) { - cur_struct_pos = struct_pos = stack_par_pos; - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) - continue; - if (sig->params [i]->type == MONO_TYPE_VALUETYPE && !sig->params [i]->data.klass->enumtype) { - gint size; - - size = mono_class_value_size (sig->params [i]->data.klass, NULL); - if (size != 4) { - /* call memcpy */ - ppc_addi (p, ppc_r3, ppc_r1, stack_par_pos); - ppc_lwz (p, ppc_r4, i*16, ppc_r15); - /* FIXME check if size > 0xffff */ - ppc_li (p, ppc_r5, size & 0xffff); - ppc_lis (p, ppc_r0, (guint32) memcpy >> 16); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) memcpy & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - stack_par_pos += (size + 3) & (~3); - } - } - } - - if (sig->hasthis) { - ppc_mr (p, ppc_r3, ppc_r16); - ppc_lwz (p, ppc_r16, stack_size - 24, ppc_r31); /* restore r16 */ - } - ppc_mr (p, ppc_r0, ppc_r14); - ppc_mr (p, ppc_r12, ppc_r15); - ppc_lwz (p, ppc_r14, stack_size - 16, ppc_r31); /* restore r14 */ - ppc_lwz (p, ppc_r15, stack_size - 20, ppc_r31); /* restore r15 */ - } - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - gint size = mono_class_native_size (klass, NULL); - - DEBUG(printf ("retval value type size: %d\n", size)); - if (size > 8) { - ppc_lwz (p, ppc_r3, stack_size - 12, ppc_r31); - ppc_lwz (p, ppc_r3, 0, ppc_r3); - gr ++; - ALWAYS_ON_STACK (stack_par_pos += 4); - } else { - NOT_IMPLEMENTED ("retval valuetype <= 8 bytes"); - } - } - } - - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) { - SAVE_4_IN_GENERIC_REGISTER; - continue; - } - simpletype = sig->params [i]->type; - enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_SZARRAY: - SAVE_4_IN_GENERIC_REGISTER; - break; - case MONO_TYPE_VALUETYPE: { - gint size; - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - size = mono_class_value_size (sig->params [i]->data.klass, NULL); - if (size == 4) { - SAVE_4_VAL_IN_GENERIC_REGISTER; - } else { - if (gr < GENERAL_REGS) { - ppc_addi (p, ppc_r3 + gr, ppc_r1, cur_struct_pos); - gr ++; - } else { - ppc_lwz (p, ppc_r11, cur_struct_pos, ppc_r1); - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); - stack_par_pos += 4; - } - cur_struct_pos += (size + 3) & (~3); - } - break; - } - case MONO_TYPE_I8: -DEBUG(printf("Mono_Type_i8. gr = %d, arg_base = %d\n", gr, ARG_BASE)); -#ifdef ALIGN_DOUBLES - if (gr & 1) - gr++; -#endif - if (gr < 7) { - ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); - ppc_lwz (p, ppc_r3 + gr + 1, i*ARG_SIZE + 4, ARG_BASE); - ALWAYS_ON_STACK (stack_par_pos += 8); - } else if (gr == 7) { - ppc_lwz (p, ppc_r3 + gr, i*ARG_SIZE, ARG_BASE); - ppc_lwz (p, ppc_r11, i*ARG_SIZE + 4, ARG_BASE); - ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); - stack_par_pos += 8; - } else { - ppc_lwz (p, ppc_r11, i*ARG_SIZE, ARG_BASE); - ppc_stw (p, ppc_r11, stack_par_pos, ppc_r1); - ppc_lwz (p, ppc_r11, i*ARG_SIZE + 4, ARG_BASE); - ppc_stw (p, ppc_r11, stack_par_pos + 4, ppc_r1); - stack_par_pos += 8; - } - gr += 2; - break; - case MONO_TYPE_R4: - if (fr < 7) { - ppc_lfs (p, ppc_f1 + fr, i*ARG_SIZE, ARG_BASE); - fr ++; - FP_ALSO_IN_REG (gr ++); - ALWAYS_ON_STACK (stack_par_pos += 4); - } else { - NOT_IMPLEMENTED ("r4 on stack"); - } - break; - case MONO_TYPE_R8: - if (fr < 7) { - ppc_lfd (p, ppc_f1 + fr, i*ARG_SIZE, ARG_BASE); - fr ++; - FP_ALSO_IN_REG (gr += 2); - ALWAYS_ON_STACK (stack_par_pos += 8); - } else { - NOT_IMPLEMENTED ("r8 on stack"); - } - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - - return p; -} - -static inline guint8 * -alloc_code_memory (guint code_size) -{ - guint8 *p; - -#ifdef NEED_MPROTECT - p = g_malloc (code_size + PAGESIZE - 1); - - /* Align to a multiple of PAGESIZE, assumed to be a power of two */ - p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1)); -#else - p = g_malloc (code_size); -#endif - DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4)); - - return p; -} - -/* static MonoString* -mono_string_new_wrapper (const char *text) -{ - return text ? mono_string_new (mono_domain_get (), text) : NULL; -} */ - -static inline guint8 * -emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig, guint stack_size, gboolean string_ctor) -{ - guint32 simpletype; - - /* call "callme" */ - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - - /* get return value */ - if (sig->ret->byref || string_ctor) { - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - } else { - simpletype = sig->ret->type; -enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stb (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_sth (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - break; - case MONO_TYPE_R4: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stfs (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ - break; - case MONO_TYPE_R8: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stfd (p, ppc_f1, 0, ppc_r9); /* save return value (f1) to "retval" */ - break; - case MONO_TYPE_I8: - ppc_lwz (p, ppc_r9, stack_size - 12, ppc_r31); /* load "retval" address */ - ppc_stw (p, ppc_r3, 0, ppc_r9); /* save return value (r3) to "retval" */ - ppc_stw (p, ppc_r4, 4, ppc_r9); /* save return value (r3) to "retval" */ - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - break; - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - } - - return p; -} - -static inline guint8 * -emit_epilog (guint8 *p, MonoMethodSignature *sig, guint stack_size) -{ - /* function epilog */ - ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ - ppc_lwz (p, ppc_r0, RET_ADDR_OFFSET, ppc_r11); /* r0 <--- r11[4] load return address */ - ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ - ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ - ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ - ppc_blr (p); /* return */ - - return p; -} - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - guint8 *p, *code_buffer; - guint stack_size, code_size; - gboolean use_memcpy = FALSE; - - DEBUG (printf ("\nPInvoke [start emiting]\n")); - calculate_sizes (sig, &stack_size, &code_size, string_ctor, &use_memcpy); - - p = code_buffer = alloc_code_memory (code_size); - p = emit_prolog (p, sig, stack_size); - p = emit_save_parameters (p, sig, stack_size, use_memcpy); - p = emit_call_and_store_retval (p, sig, stack_size, string_ctor); - p = emit_epilog (p, sig, stack_size); - - /* { - guchar *cp; - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - for (cp = code_buffer; cp < p; cp++) { - printf (".byte 0x%x\n", *cp); - } - } */ - -#ifdef NEED_MPROTECT - if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) { - g_error ("Cannot mprotect trampoline\n"); - } -#endif - - DEBUG (printf ("emited code size: %d\n", p - code_buffer)); - flush_icache (code_buffer, p - code_buffer); - - DEBUG (printf ("PInvoke [end emiting]\n")); - - return (MonoPIFunc) code_buffer; - /* return fake_func; */ -} - - -#ifdef __APPLE__ -#define MINV_POS 40 /* MonoInvocation structure offset on stack - STACK_PARAM_OFFSET + 4 pointer args for stackval_from_data */ -#else -#define MINV_POS 8 /* MonoInvocation structure offset on stack */ -#endif -#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) -#define OBJ_POS 8 -#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) - -/* - * Returns a pointer to a native function that can be used to - * call the specified method. - * The function created will receive the arguments according - * to the call convention specified in the method. - * This function works by creating a MonoInvocation structure, - * filling the fields in and calling ves_exec_method on it. - * Still need to figure out how to handle the exception stuff - * across the managed/unmanaged boundary. - */ -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig; - MonoJitInfo *ji; - guint8 *p, *code_buffer; - guint i, align = 0, code_size, stack_size, stackval_arg_pos, local_pos, local_start, reg_param = 0, stack_param, - cpos, vt_cur; - gint *vtbuf; - guint32 simpletype; - - code_size = 1024; - stack_size = 1024; - stack_param = 0; - - sig = mono_method_signature (method); - - p = code_buffer = g_malloc (code_size); - - DEBUG (printf ("\nDelegate [start emiting] %s\n", mono_method_get_name (method))); - - /* prolog */ - ppc_stwu (p, ppc_r1, -stack_size, ppc_r1); /* sp <--- sp - stack_size, sp[0] <---- sp save sp, alloc stack */ - ppc_mflr (p, ppc_r0); /* r0 <--- LR */ - ppc_stw (p, ppc_r31, stack_size - 4, ppc_r1); /* sp[+4] <--- r31 save r31 */ - ppc_stw (p, ppc_r0, stack_size + RET_ADDR_OFFSET, ppc_r1); /* sp[-4] <--- LR save return address for "callme" */ - ppc_mr (p, ppc_r31, ppc_r1); /* r31 <--- sp */ - - /* let's fill MonoInvocation */ - /* first zero some fields */ - ppc_li (p, ppc_r0, 0); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), ppc_r31); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), ppc_r31); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), ppc_r31); - - /* set method pointer */ - ppc_lis (p, ppc_r0, (guint32) method >> 16); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) method & 0xffff); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), ppc_r31); - - local_start = local_pos = MINV_POS + sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval); - - if (sig->hasthis) { - ppc_stw (p, ppc_r3, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), ppc_r31); - reg_param = 1; - } - - if (sig->param_count) { - gint save_count = MIN (8, sig->param_count + sig->hasthis); - for (i = reg_param; i < save_count; i ++) { - ppc_stw (p, ppc_r3 + i, local_pos, ppc_r31); - local_pos += 4; - DEBUG (printf ("save r%d\n", 4 + i)); - } - } - - /* prepare space for valuetypes */ - vt_cur = local_pos; - vtbuf = alloca (sizeof(int)*sig->param_count); - cpos = 0; - for (i = 0; i < sig->param_count; i++) { - MonoType *type = sig->params [i]; - vtbuf [i] = -1; - if (type->type == MONO_TYPE_VALUETYPE) { - MonoClass *klass = type->data.klass; - gint size; - - if (klass->enumtype) - continue; - size = mono_class_native_size (klass, &align); - cpos += align - 1; - cpos &= ~(align - 1); - vtbuf [i] = cpos; - cpos += size; - } - } - cpos += 3; - cpos &= ~3; - - local_pos += cpos; - - /* set MonoInvocation::stack_args */ - stackval_arg_pos = MINV_POS + sizeof (MonoInvocation); - ppc_addi (p, ppc_r0, ppc_r31, stackval_arg_pos); - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), ppc_r31); - - /* add stackval arguments */ - for (i = 0; i < sig->param_count; ++i) { - if (reg_param < 8) { - ppc_addi (p, ppc_r5, ppc_r31, local_start + i*4); - reg_param ++; - } else { - ppc_addi (p, ppc_r5, stack_size + 8 + stack_param, ppc_r31); - stack_param ++; - } - ppc_lis (p, ppc_r3, (guint32) sig->params [i] >> 16); - - if (vtbuf [i] >= 0) { - ppc_addi (p, ppc_r4, ppc_r31, vt_cur); - ppc_stw (p, ppc_r4, stackval_arg_pos, ppc_r31); - ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); - ppc_lwz (p, ppc_r5, 0, ppc_r5); - vt_cur += vtbuf [i]; - } else { - ppc_addi (p, ppc_r4, ppc_r31, stackval_arg_pos); - } - ppc_ori (p, ppc_r3, ppc_r3, (guint32) sig->params [i] & 0xffff); - ppc_lis (p, ppc_r0, (guint32) stackval_from_data >> 16); - ppc_li (p, ppc_r6, sig->pinvoke); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) stackval_from_data & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - - stackval_arg_pos += sizeof (stackval); - } - - /* return value storage */ - if (sig->param_count) { - ppc_addi (p, ppc_r0, ppc_r31, stackval_arg_pos); - } - ppc_stw (p, ppc_r0, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)), ppc_r31); - - /* call ves_exec_method */ - ppc_lis (p, ppc_r0, (guint32) ves_exec_method >> 16); - ppc_addi (p, ppc_r3, ppc_r31, MINV_POS); - ppc_ori (p, ppc_r0, ppc_r0, (guint32) ves_exec_method & 0xffff); - ppc_mtlr (p, ppc_r0); - ppc_blrl (p); - - /* move retval from stackval to proper place (r3/r4/...) */ - if (sig->ret->byref) { - DEBUG (printf ("ret by ref\n")); - ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); - } else { - enum_retvalue: - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - ppc_lbz (p, ppc_r3, stackval_arg_pos, ppc_r31); - break; - case MONO_TYPE_I2: - case MONO_TYPE_U2: - ppc_lhz (p, ppc_r3, stackval_arg_pos, ppc_r31); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); - break; - case MONO_TYPE_I8: - ppc_lwz (p, ppc_r3, stackval_arg_pos, ppc_r31); - ppc_lwz (p, ppc_r4, stackval_arg_pos + 4, ppc_r31); - break; - case MONO_TYPE_R4: - ppc_lfs (p, ppc_f1, stackval_arg_pos, ppc_r31); - break; - case MONO_TYPE_R8: - ppc_lfd (p, ppc_f1, stackval_arg_pos, ppc_r31); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - NOT_IMPLEMENTED ("value type as ret val from delegate"); - break; - default: - g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); - break; - } - } - - /* epilog */ - ppc_lwz (p, ppc_r11, 0, ppc_r1); /* r11 <--- sp[0] load backchain from caller's function */ - ppc_lwz (p, ppc_r0, RET_ADDR_OFFSET, ppc_r11); /* r0 <--- r11[4] load return address */ - ppc_mtlr (p, ppc_r0); /* LR <--- r0 set return address */ - ppc_lwz (p, ppc_r31, -4, ppc_r11); /* r31 <--- r11[-4] restore r31 */ - ppc_mr (p, ppc_r1, ppc_r11); /* sp <--- r11 restore stack */ - ppc_blr (p); /* return */ - - DEBUG (printf ("emited code size: %d\n", p - code_buffer)); - DEBUG (disassemble (code_buffer, p - code_buffer)); - flush_icache (code_buffer, p - code_buffer); - - DEBUG (printf ("Delegate [end emiting]\n")); - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = p - code_buffer; - ji->code_start = code_buffer; - - mono_jit_info_table_add (mono_get_root_domain (), ji); - - return ji->code_start; -} diff --git a/x86/Makefile.am b/x86/Makefile.am index e88506e..bab0f9e 100644 --- a/x86/Makefile.am +++ b/x86/Makefile.am @@ -1,5 +1 @@ -AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir) - -noinst_LTLIBRARIES = libmonoarch-x86.la - -libmonoarch_x86_la_SOURCES = tramp.c x86-codegen.h +EXTRA_DIST = x86-codegen.h \ No newline at end of file diff --git a/x86/test.c b/x86/test.c deleted file mode 100644 index 3511e8f..0000000 --- a/x86/test.c +++ /dev/null @@ -1,225 +0,0 @@ -#include "x86-codegen.h" -#include - -/* don't run the resulting program, it will destroy your computer, - * just objdump -d it to inspect we generated the correct assembler. - */ - -int main() { - unsigned char code [16000]; - unsigned char *p = code; - unsigned char *target, *start, *end; - unsigned long mem_addr = 0xdeadbeef; - int size, i; - - printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n"); - - x86_prolog (p, 16, X86_CALLER_REGS); - - x86_cmpxchg_reg_reg (p, X86_EAX, X86_EBP); - x86_cmpxchg_membase_reg (p, X86_EAX, 12, X86_EBP); - - x86_xchg_reg_reg (p, X86_EAX, X86_EBP, 4); - x86_xchg_reg_reg (p, X86_EAX, X86_EBP, 1); // FIXME? - x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBP, 4); - x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBP, 2); - x86_xchg_membase_reg (p, X86_EAX, 12, X86_EBX, 1); // FIXME? - - x86_inc_reg (p, X86_EAX); - x86_inc_mem (p, mem_addr); - x86_inc_membase (p, X86_ESP, 4); - - x86_nop (p); - x86_nop (p); - - x86_dec_reg (p, X86_EAX); - x86_dec_reg (p, X86_ECX); - x86_dec_mem (p, mem_addr); - x86_dec_membase (p, X86_ESP, 4); - - x86_not_reg (p, X86_EDX); - x86_not_reg (p, X86_ECX); - x86_not_mem (p, mem_addr); - x86_not_membase (p, X86_ESP, 4); - x86_not_membase (p, X86_ESP, 0x4444444); - x86_not_membase (p, X86_EBP, 0x4444444); - x86_not_membase (p, X86_ECX, 0x4444444); - x86_not_membase (p, X86_EDX, 0); - x86_not_membase (p, X86_EBP, 0); - - x86_neg_reg (p, X86_EAX); - x86_neg_reg (p, X86_ECX); - x86_neg_mem (p, mem_addr); - x86_neg_membase (p, X86_ESP, 8); - - x86_alu_reg_imm (p, X86_ADD, X86_EAX, 5); - x86_alu_reg_imm (p, X86_ADD, X86_EBX, -10); - x86_alu_reg_imm (p, X86_SUB, X86_EDX, 7); - x86_alu_reg_imm (p, X86_OR, X86_ESP, 0xffffedaf); - x86_alu_reg_imm (p, X86_CMP, X86_ECX, 1); - x86_alu_mem_imm (p, X86_ADC, mem_addr, 2); - x86_alu_membase_imm (p, X86_ADC, X86_ESP, -4, 4); - x86_alu_membase_imm (p, X86_ADC, X86_ESP, -12, 0xffffedaf); - - x86_alu_mem_reg (p, X86_SUB, mem_addr, X86_EDX); - x86_alu_reg_reg (p, X86_ADD, X86_EAX, X86_EBX); - x86_alu_reg_mem (p, X86_ADD, X86_EAX, mem_addr); - x86_alu_reg_imm (p, X86_ADD, X86_EAX, 0xdeadbeef); - x86_alu_reg_membase (p, X86_XOR, X86_EDX, X86_ESP, 4); - x86_alu_membase_reg (p, X86_XOR, X86_EBP, 8, X86_ESI); - - x86_test_reg_imm (p, X86_EAX, 16); - x86_test_reg_imm (p, X86_EDX, -16); - x86_test_mem_imm (p, mem_addr, 1); - x86_test_membase_imm (p, X86_EBP, 8, 1); - - x86_test_reg_reg (p, X86_EAX, X86_EDX); - x86_test_mem_reg (p, mem_addr, X86_EDX); - x86_test_membase_reg (p, X86_ESI, 4, X86_EDX); - - x86_shift_reg_imm (p, X86_SHL, X86_EAX, 1); - x86_shift_reg_imm (p, X86_SHL, X86_EDX, 2); - - x86_shift_mem_imm (p, X86_SHL, mem_addr, 2); - x86_shift_membase_imm (p, X86_SHLR, X86_EBP, 8, 4); - - /* - * Shift by CL - */ - x86_shift_reg (p, X86_SHL, X86_EAX); - x86_shift_mem (p, X86_SHL, mem_addr); - - x86_mul_reg (p, X86_EAX, 0); - x86_mul_reg (p, X86_EAX, 1); - x86_mul_membase (p, X86_EBP, 8, 1); - - x86_imul_reg_reg (p, X86_EBX, X86_EDX); - x86_imul_reg_membase (p, X86_EBX, X86_EBP, 12); - - x86_imul_reg_reg_imm (p, X86_EBX, X86_EDX, 10); - x86_imul_reg_mem_imm (p, X86_EBX, mem_addr, 20); - x86_imul_reg_membase_imm (p, X86_EBX, X86_EBP, 16, 300); - - x86_div_reg (p, X86_EDX, 0); - x86_div_reg (p, X86_EDX, 1); - x86_div_mem (p, mem_addr, 1); - x86_div_membase (p, X86_ESI, 4, 1); - - x86_mov_mem_reg (p, mem_addr, X86_EAX, 4); - x86_mov_mem_reg (p, mem_addr, X86_EAX, 2); - x86_mov_mem_reg (p, mem_addr, X86_EAX, 1); - x86_mov_membase_reg (p, X86_EBP, 4, X86_EAX, 1); - - x86_mov_regp_reg (p, X86_EAX, X86_EAX, 4); - x86_mov_membase_reg (p, X86_EAX, 0, X86_EAX, 4); - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); - x86_mov_reg_memindex (p, X86_ECX, X86_EAX, 34, X86_EDX, 2, 4); - x86_mov_reg_memindex (p, X86_ECX, X86_NOBASEREG, 34, X86_EDX, 2, 4); - x86_mov_memindex_reg (p, X86_EAX, X86_EAX, 0, X86_EDX, 2, 4); - x86_mov_reg_reg (p, X86_EAX, X86_EAX, 1); - x86_mov_reg_reg (p, X86_EAX, X86_EAX, 4); - x86_mov_reg_mem (p, X86_EAX, mem_addr, 4); - - x86_mov_reg_imm (p, X86_EAX, 10); - x86_mov_mem_imm (p, mem_addr, 54, 4); - x86_mov_mem_imm (p, mem_addr, 54, 1); - - x86_lea_mem (p, X86_EDX, mem_addr); - /* test widen */ - x86_widen_memindex (p, X86_EDX, X86_ECX, 0, X86_EBX, 2, 1, 0); - - x86_cdq (p); - x86_wait (p); - - x86_fp_op_mem (p, X86_FADD, mem_addr, 1); - x86_fp_op_mem (p, X86_FSUB, mem_addr, 0); - x86_fp_op (p, X86_FSUB, 2); - x86_fp_op_reg (p, X86_FMUL, 1, 0); - x86_fstp (p, 2); - x86_fcompp (p); - x86_fnstsw (p); - x86_fnstcw (p, mem_addr); - x86_fnstcw_membase (p, X86_ESP, -8); - - x86_fldcw_membase (p, X86_ESP, -8); - x86_fchs (p); - x86_frem (p); - x86_fxch (p, 3); - x86_fcomip (p, 3); - x86_fld_membase (p, X86_ESP, -8, 1); - x86_fld_membase (p, X86_ESP, -8, 0); - x86_fld80_membase (p, X86_ESP, -8); - x86_fild_membase (p, X86_ESP, -8, 1); - x86_fild_membase (p, X86_ESP, -8, 0); - x86_fld_reg (p, 4); - x86_fldz (p); - x86_fld1 (p); - - x86_fst (p, mem_addr, 1, 0); - x86_fst (p, mem_addr, 1, 1); - x86_fst (p, mem_addr, 0, 1); - - x86_fist_pop_membase (p, X86_EDX, 4, 1); - x86_fist_pop_membase (p, X86_EDX, 4, 0); - - x86_push_reg (p, X86_EBX); - x86_push_membase (p, X86_EBP, 8); - x86_push_imm (p, -1); - x86_pop_reg (p, X86_EBX); - - x86_pushad (p); - x86_pushfd (p); - x86_popfd (p); - x86_popad (p); - - target = p; - - start = p; - x86_jump32 (p, mem_addr); - x86_patch (start, target); - start = p; - x86_jump8 (p, 12); - x86_patch (start, target); - x86_jump_reg (p, X86_EAX); - x86_jump_membase (p, X86_EDX, 16); - - x86_jump_code (p, target); - - x86_branch8 (p, X86_CC_EQ, 54, 1); - x86_branch32 (p, X86_CC_LT, 54, 0); - x86_branch (p, X86_CC_GT, target, 0); - x86_branch_disp (p, X86_CC_NE, -4, 0); - - x86_set_reg (p, X86_CC_EQ, X86_EAX, 0); - x86_set_membase (p, X86_CC_LE, X86_EBP, -8, 0); - - x86_call_code (p, printf); - x86_call_reg (p, X86_ECX); - - x86_sahf (p); - - x86_fsin (p); - x86_fcos (p); - x86_fabs (p); - x86_fpatan (p); - x86_fprem (p); - x86_fprem1 (p); - x86_frndint (p); - x86_fsqrt (p); - x86_fptan (p); - - x86_leave (p); - x86_ret (p); - x86_ret_imm (p, 24); - - x86_cmov_reg (p, X86_CC_GT, 1, X86_EAX, X86_EDX); - x86_cmov_membase (p, X86_CC_GT, 0, X86_EAX, X86_EDX, -4); - - x86_nop (p); - x86_epilog (p, X86_CALLER_REGS); - - size = p-code; - for (i = 0; i < size; ++i) - printf (".byte %d\n", (unsigned int) code [i]); - return 0; -} diff --git a/x86/tramp.c b/x86/tramp.c deleted file mode 100644 index fab5a55..0000000 --- a/x86/tramp.c +++ /dev/null @@ -1,545 +0,0 @@ -/* - * Create trampolines to invoke arbitrary functions. - * - * Copyright (C) Ximian Inc. - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Dietmar Maurer (dietmar@ximian.com) - * - */ - -#include "config.h" -#include -#include -#include "x86-codegen.h" -#include "mono/metadata/class.h" -#include "mono/metadata/tabledefs.h" -#include "mono/interpreter/interp.h" -#include "mono/metadata/appdomain.h" -#include "mono/metadata/marshal.h" - -/* - * The resulting function takes the form: - * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments); - */ -#define FUNC_ADDR_POS 8 -#define RETVAL_POS 12 -#define THIS_POS 16 -#define ARGP_POS 20 -#define LOC_POS -4 - -#define ARG_SIZE sizeof (stackval) - -MonoPIFunc -mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - unsigned char *p, *code_buffer; - guint32 stack_size = 0, code_size = 50; - guint32 arg_pos, simpletype; - int i, stringp; - static GHashTable *cache = NULL; - MonoPIFunc res; - - if (!cache) - cache = g_hash_table_new ((GHashFunc)mono_signature_hash, - (GCompareFunc)mono_metadata_signature_equal); - - if ((res = (MonoPIFunc)g_hash_table_lookup (cache, sig))) - return res; - - if (sig->hasthis) { - stack_size += sizeof (gpointer); - code_size += 10; - } - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) { - stack_size += sizeof (gpointer); - code_size += 5; - } - - for (i = 0; i < sig->param_count; ++i) { - if (sig->params [i]->byref) { - stack_size += sizeof (gpointer); - code_size += 20; - continue; - } - simpletype = sig->params [i]->type; -enum_calc_size: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_CHAR: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - stack_size += 4; - code_size += i < 10 ? 5 : 8; - break; - case MONO_TYPE_VALUETYPE: { - int size; - if (sig->params [i]->data.klass->enumtype) { - simpletype = sig->params [i]->data.klass->enum_basetype->type; - goto enum_calc_size; - } - if ((size = mono_class_native_size (sig->params [i]->data.klass, NULL)) != 4) { - stack_size += size + 3; - stack_size &= ~3; - code_size += 32; - } else { - stack_size += 4; - code_size += i < 10 ? 5 : 8; - } - break; - } - case MONO_TYPE_I8: - stack_size += 8; - code_size += i < 10 ? 5 : 8; - break; - case MONO_TYPE_R4: - stack_size += 4; - code_size += i < 10 ? 10 : 13; - break; - case MONO_TYPE_R8: - stack_size += 8; - code_size += i < 10 ? 7 : 10; - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i]->type); - } - } - /* - * FIXME: take into account large return values. - */ - - code_buffer = p = alloca (code_size); - - /* - * Standard function prolog. - */ - x86_push_reg (p, X86_EBP); - x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); - /* - * and align to 16 byte boundary... - */ - stack_size += 15; - stack_size &= ~15; - - if (stack_size) - x86_alu_reg_imm (p, X86_SUB, X86_ESP, stack_size); - - /* - * EDX has the pointer to the args. - */ - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); - - /* - * Push arguments in reverse order. - */ - stringp = 0; - for (i = sig->param_count; i; --i) { - arg_pos = ARG_SIZE * (i - 1); - if (sig->params [i - 1]->byref) { - x86_push_membase (p, X86_EDX, arg_pos); - continue; - } - simpletype = sig->params [i - 1]->type; -enum_marshal: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - case MONO_TYPE_CHAR: - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_PTR: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - x86_push_membase (p, X86_EDX, arg_pos); - break; - case MONO_TYPE_R4: - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 4); - x86_fld_membase (p, X86_EDX, arg_pos, TRUE); - x86_fst_membase (p, X86_ESP, 0, FALSE, TRUE); - break; - case MONO_TYPE_CLASS: - x86_push_membase (p, X86_EDX, arg_pos); - break; - case MONO_TYPE_SZARRAY: - x86_push_membase (p, X86_EDX, arg_pos); - break; - case MONO_TYPE_VALUETYPE: - if (!sig->params [i - 1]->data.klass->enumtype) { - int size = mono_class_native_size (sig->params [i - 1]->data.klass, NULL); - if (size == 4) { - /* it's a structure that fits in 4 bytes, need to push the value pointed to */ - x86_mov_reg_membase (p, X86_EAX, X86_EDX, arg_pos, 4); - x86_push_regp (p, X86_EAX); - } else { - int ss = size; - ss += 3; - ss &= ~3; - - x86_alu_reg_imm (p, X86_SUB, X86_ESP, ss); - x86_push_imm (p, size); - x86_push_membase (p, X86_EDX, arg_pos); - x86_lea_membase (p, X86_EAX, X86_ESP, 2*4); - x86_push_reg (p, X86_EAX); - x86_mov_reg_imm (p, X86_EAX, memcpy); - x86_call_reg (p, X86_EAX); - x86_alu_reg_imm (p, X86_ADD, X86_ESP, 12); - /* memcpy might clobber EDX so restore it */ - x86_mov_reg_membase (p, X86_EDX, X86_EBP, ARGP_POS, 4); - } - } else { - /* it's an enum value */ - simpletype = sig->params [i - 1]->data.klass->enum_basetype->type; - goto enum_marshal; - } - break; - case MONO_TYPE_I8: - case MONO_TYPE_U8: - case MONO_TYPE_R8: - x86_push_membase (p, X86_EDX, arg_pos + 4); - x86_push_membase (p, X86_EDX, arg_pos); - break; - default: - g_error ("Can't trampoline 0x%x", sig->params [i - 1]->type); - } - } - - if (sig->hasthis) { - if (sig->call_convention != MONO_CALL_THISCALL) { - x86_mov_reg_membase (p, X86_EDX, X86_EBP, THIS_POS, 4); - x86_push_reg (p, X86_EDX); - } else { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, THIS_POS, 4); - } - } - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_push_membase (p, X86_ECX, 0); - } - } - - /* - * Insert call to function - */ - x86_mov_reg_membase (p, X86_EDX, X86_EBP, FUNC_ADDR_POS, 4); - x86_call_reg (p, X86_EDX); - - /* - * Handle retval. - * Small integer and pointer values are in EAX. - * Long integers are in EAX:EDX. - * FP values are on the FP stack. - */ - - if (sig->ret->byref || string_ctor) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - } else { - simpletype = sig->ret->type; - enum_retvalue: - switch (simpletype) { - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_CLASS: - case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - break; - case MONO_TYPE_STRING: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - break; - case MONO_TYPE_R4: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_fst_membase (p, X86_ECX, 0, FALSE, TRUE); - break; - case MONO_TYPE_R8: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_fst_membase (p, X86_ECX, 0, TRUE, TRUE); - break; - case MONO_TYPE_I8: - x86_mov_reg_membase (p, X86_ECX, X86_EBP, RETVAL_POS, 4); - x86_mov_regp_reg (p, X86_ECX, X86_EAX, 4); - x86_mov_membase_reg (p, X86_ECX, 4, X86_EDX, 4); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - case MONO_TYPE_VOID: - break; - default: - g_error ("Can't handle as return value 0x%x", sig->ret->type); - } - } - - /* - * Standard epilog. - */ - x86_leave (p); - x86_ret (p); - - g_assert (p - code_buffer < code_size); - res = (MonoPIFunc)g_memdup (code_buffer, p - code_buffer); - - g_hash_table_insert (cache, sig, res); - - return res; -} - -#define MINV_POS (- sizeof (MonoInvocation)) -#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count) -#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type)) - -/* - * Returns a pointer to a native function that can be used to - * call the specified method. - * The function created will receive the arguments according - * to the call convention specified in the method. - * This function works by creating a MonoInvocation structure, - * filling the fields in and calling ves_exec_method on it. - * Still need to figure out how to handle the exception stuff - * across the managed/unmanaged boundary. - */ -void * -mono_arch_create_method_pointer (MonoMethod *method) -{ - MonoMethodSignature *sig; - MonoJitInfo *ji; - unsigned char *p, *code_buffer; - gint32 local_size; - gint32 stackval_pos, arg_pos = 8; - int i, size, align, cpos; - int *vtbuf; - - sig = method->signature; - - code_buffer = p = alloca (512); /* FIXME: check for overflows... */ - vtbuf = alloca (sizeof(int)*sig->param_count); - - local_size = sizeof (MonoInvocation) + sizeof (stackval) * (sig->param_count + 1); - - local_size += 7; - local_size &= ~7; - - stackval_pos = -local_size; - - cpos = 0; - for (i = 0; i < sig->param_count; i++) { - MonoType *type = sig->params [i]; - vtbuf [i] = -1; - if (type->type == MONO_TYPE_VALUETYPE) { - MonoClass *klass = type->data.klass; - if (klass->enumtype) - continue; - size = mono_class_native_size (klass, &align); - cpos += align - 1; - cpos &= ~(align - 1); - vtbuf [i] = cpos; - cpos += size; - } - } - - cpos += 7; - cpos &= ~7; - - local_size += cpos; - - /* - * Standard function prolog. - */ - x86_push_reg (p, X86_EBP); - x86_mov_reg_reg (p, X86_EBP, X86_ESP, 4); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, local_size); - - /* - * Initialize MonoInvocation fields, first the ones known now. - */ - x86_mov_reg_imm (p, X86_EAX, 0); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)), X86_EAX, 4); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)), X86_EAX, 4); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)), X86_EAX, 4); - /* - * Set the method pointer. - */ - x86_mov_membase_imm (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)), (int)method, 4); - - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref && !sig->ret->data.klass->enumtype) - arg_pos += 4; - - /* - * Handle this. - */ - if (sig->hasthis) { - if (sig->call_convention != MONO_CALL_THISCALL) { - /* - * Grab it from the stack, otherwise it's already in ECX. - */ - x86_mov_reg_membase (p, X86_ECX, X86_EBP, arg_pos, 4); - arg_pos += 4; - } - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)), X86_ECX, 4); - } - /* - * Handle the arguments. stackval_pos is the posset of the stackval array from EBP. - * arg_pos is the offset from EBP to the incoming arg on the stack. - * We just call stackval_from_data to handle all the (nasty) issues.... - */ - x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)), X86_EAX, 4); - for (i = 0; i < sig->param_count; ++i) { - if (vtbuf [i] >= 0) { - x86_lea_membase (p, X86_EAX, X86_EBP, - local_size + vtbuf [i]); - x86_mov_membase_reg (p, X86_EBP, stackval_pos, X86_EAX, 4); - } - x86_mov_reg_imm (p, X86_ECX, stackval_from_data); - x86_lea_membase (p, X86_EDX, X86_EBP, arg_pos); - x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); - x86_push_imm (p, sig->pinvoke); - x86_push_reg (p, X86_EDX); - x86_push_reg (p, X86_EAX); - x86_push_imm (p, sig->params [i]); - x86_call_reg (p, X86_ECX); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); - stackval_pos += sizeof (stackval); - /* fixme: alignment */ - if (sig->pinvoke) - arg_pos += mono_type_native_stack_size (sig->params [i], &align); - else - arg_pos += mono_type_stack_size (sig->params [i], &align); - } - - /* - * Handle the return value storage area. - */ - x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); - x86_mov_membase_reg (p, X86_EBP, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)), X86_EAX, 4); - if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) { - MonoClass *klass = sig->ret->data.klass; - if (!klass->enumtype) { - x86_mov_reg_membase (p, X86_ECX, X86_EBP, 8, 4); - x86_mov_membase_reg (p, X86_EBP, stackval_pos, X86_ECX, 4); - } - } - - /* - * Call the method. - */ - x86_lea_membase (p, X86_EAX, X86_EBP, MINV_POS); - x86_push_reg (p, X86_EAX); - x86_mov_reg_imm (p, X86_EDX, ves_exec_method); - x86_call_reg (p, X86_EDX); - - /* - * Move the return value to the proper place. - */ - x86_lea_membase (p, X86_EAX, X86_EBP, stackval_pos); - if (sig->ret->byref) { - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); - } else { - int simpletype = sig->ret->type; - enum_retvalue: - switch (sig->ret->type) { - case MONO_TYPE_VOID: - break; - case MONO_TYPE_BOOLEAN: - case MONO_TYPE_I1: - case MONO_TYPE_U1: - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 1); - break; - case MONO_TYPE_CHAR: - case MONO_TYPE_I2: - case MONO_TYPE_U2: - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 2); - break; - case MONO_TYPE_I4: - case MONO_TYPE_U4: - case MONO_TYPE_I: - case MONO_TYPE_U: - case MONO_TYPE_OBJECT: - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); - break; - case MONO_TYPE_I8: - x86_mov_reg_membase (p, X86_EDX, X86_EAX, 4, 4); - x86_mov_reg_membase (p, X86_EAX, X86_EAX, 0, 4); - break; - case MONO_TYPE_R8: - x86_fld_membase (p, X86_EAX, 0, TRUE); - break; - case MONO_TYPE_VALUETYPE: - if (sig->ret->data.klass->enumtype) { - simpletype = sig->ret->data.klass->enum_basetype->type; - goto enum_retvalue; - } - - x86_push_imm (p, sig->pinvoke); - x86_push_membase (p, X86_EBP, stackval_pos); - x86_push_reg (p, X86_EAX); - x86_push_imm (p, sig->ret); - x86_mov_reg_imm (p, X86_ECX, stackval_to_data); - x86_call_reg (p, X86_ECX); - x86_alu_reg_imm (p, X86_SUB, X86_ESP, 16); - - break; - default: - g_error ("Type 0x%x not handled yet in thunk creation", sig->ret->type); - break; - } - } - - /* - * Standard epilog. - */ - x86_leave (p); - x86_ret (p); - - g_assert (p - code_buffer < 512); - - ji = g_new0 (MonoJitInfo, 1); - ji->method = method; - ji->code_size = p - code_buffer; - ji->code_start = g_memdup (code_buffer, p - code_buffer); - - mono_jit_info_table_add (mono_get_root_domain (), ji); - - return ji->code_start; -} -- cgit v1.1 From e8fa461503cf681fd7f6fffdbe94346cb4a0b94f Mon Sep 17 00:00:00 2001 From: Zoltan Varga Date: Sat, 13 Sep 2014 13:56:18 -0400 Subject: [runtime] Remove an unused interpreter file. --- unknown.c | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 unknown.c diff --git a/unknown.c b/unknown.c deleted file mode 100644 index d865299..0000000 --- a/unknown.c +++ /dev/null @@ -1,18 +0,0 @@ -#include "mono/interpreter/interp.h" -#ifdef NO_PORT -MonoPIFunc -mono_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) -{ - g_error ("Unsupported arch"); - return NULL; -} - -void * -mono_create_method_pointer (MonoMethod *method) -{ - g_error ("Unsupported arch"); - return NULL; -} - -#endif - -- cgit v1.1 From cf4fa78a8d107bf09aa4de709ace549bcb5bac1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Carretero?= Date: Tue, 30 Sep 2014 20:53:16 -0400 Subject: real_nd: improve transpose size by unrolling of block of h=8, w=1 That works well on ARM and should also work better elsewhere. --- src/ffts_real_nd.c | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index fe9ef69..01d0fec 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -60,15 +60,48 @@ void ffts_free_nd_real(ffts_plan_t *p) { free(p); } -void ffts_scalar_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) { - - size_t i,j; - for(i=0;i Date: Tue, 30 Sep 2014 20:54:46 -0400 Subject: real: fix alignment issue in 1d execution (bug #30) Because of the size of M/2+1, you can't expect the data to be aligned at 128 bits. --- src/ffts_real.c | 160 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 80 insertions(+), 80 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index 7fad638..87eaab7 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -61,45 +61,46 @@ void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) { size_t i; #ifdef __ARM_NEON__ for(i=0;i Date: Tue, 30 Sep 2014 21:01:14 -0400 Subject: automatic trailing space removal ... because I use a real editor --- src/ffts.h | 46 +++++++++++++++++++++++----------------------- src/ffts_real.c | 30 +++++++++++++++--------------- src/ffts_real_nd.c | 50 +++++++++++++++++++++++++------------------------- 3 files changed, 63 insertions(+), 63 deletions(-) diff --git a/src/ffts.h b/src/ffts.h index f08a6bb..cd9e24b 100644 --- a/src/ffts.h +++ b/src/ffts.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -58,11 +58,11 @@ static const __attribute__ ((aligned(64))) float w_data[16] = { 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, -0.70710678118654757273731092936941, -0.70710678118654746171500846685376, - 1.0f, 0.70710678118654757273731092936941f, + 1.0f, 0.70710678118654757273731092936941f, -0.0f, -0.70710678118654746171500846685376, 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, - 1.0f, 0.70710678118654757273731092936941f, + 1.0f, 0.70710678118654757273731092936941f, 0.0f, 0.70710678118654746171500846685376 }; @@ -87,7 +87,7 @@ typedef struct _ffts_plan_t ffts_plan_t; struct _ffts_plan_t { /** - * + * */ ptrdiff_t *offsets; #ifdef DYNAMIC_DISABLED @@ -96,26 +96,26 @@ struct _ffts_plan_t { */ void *ws; /** - * ee - 2 size x size8 + * ee - 2 size x size8 * oo - 2 x size4 in parallel - * oe - + * oe - */ void *oe_ws, *eo_ws, *ee_ws; #else void __attribute__((aligned(32))) *ws; void __attribute__((aligned(32))) *oe_ws, *eo_ws, *ee_ws; #endif - /** + /** * Pointer into an array of precomputed indexes for the input data array */ - ptrdiff_t *is; + ptrdiff_t *is; /** * Twiddle Factor Indexes */ size_t *ws_is; - - /** + + /** * Size of the loops for the base cases */ size_t i0, i1, n_luts; @@ -128,33 +128,33 @@ struct _ffts_plan_t { /** * Used in multidimensional Code ?? */ - transform_index_t *transforms; + transform_index_t *transforms; //transform_func_t transform; - - /** - * Pointer to the dynamically generated function + + /** + * Pointer to the dynamically generated function * that will execute the FFT */ void (*transform)(ffts_plan_t * , const void * , void * ); /** - * Pointer to the base memory address of + * Pointer to the base memory address of * of the transform function */ void *transform_base; /** - * Size of the memory block contain the + * Size of the memory block contain the * generated code */ size_t transform_size; /** * Points to the cosnant variables used by - * the Assembly Code + * the Assembly Code */ void *constants; - + // multi-dimensional stuff: struct _ffts_plan_t **plans; int rank; @@ -174,13 +174,13 @@ struct _ffts_plan_t { * Coefficiants for the real valued transforms */ float *A, *B; - + size_t i2; }; void ffts_free(ffts_plan_t *); -ffts_plan_t *ffts_init_1d(size_t N, int sign); +ffts_plan_t *ffts_init_1d(size_t N, int sign); void ffts_execute(ffts_plan_t *, const void *, void *); #endif // vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: diff --git a/src/ffts_real.c b/src/ffts_real.c index 87eaab7..97ff942 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -111,10 +111,10 @@ void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) { #endif } - + out[N] = buf[0] - buf[1]; out[N+1] = 0.0f; - + } void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) { @@ -124,12 +124,12 @@ void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) { float *A = p->A; float *B = p->B; size_t N = p->N; - + float *p_buf0 = in; float *p_buf1 = in + N - 2; - + float *p_out = buf; - + size_t i; #ifdef __ARM_NEON__ for(i=0;iplans[0]->transform(p->plans[0], buf, out); - + } ffts_plan_t *ffts_init_1d_real(size_t N, int sign) { @@ -189,13 +189,13 @@ ffts_plan_t *ffts_init_1d_real(size_t N, int sign) { if(sign < 0) p->transform = &ffts_execute_1d_real; else p->transform = &ffts_execute_1d_real_inv; - + p->destroy = &ffts_free_1d_real; p->N = N; p->rank = 1; p->plans = malloc(sizeof(ffts_plan_t **) * 1); - p->plans[0] = ffts_init_1d(N/2, sign); + p->plans[0] = ffts_init_1d(N/2, sign); p->buf = valloc(sizeof(float) * 2 * ((N/2) + 1)); @@ -219,7 +219,7 @@ ffts_plan_t *ffts_init_1d_real(size_t N, int sign) { p->B[2 * i + 1] = 1.0 * (1.0 * cos (2.0f * PI / (double) (N) * (double) i)); } } - + return p; } diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 01d0fec..151b72a 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -41,14 +41,14 @@ void ffts_free_nd_real(ffts_plan_t *p) { int i; for(i=0;irank;i++) { - + ffts_plan_t *x = p->plans[i]; int k; for(k=i+1;krank;k++) { if(x == p->plans[k]) p->plans[k] = NULL; } - + if(x) ffts_free(x); } @@ -112,15 +112,15 @@ void ffts_execute_nd_real(ffts_plan_t *p, const void * in, void * out) { size_t i,j; for(i=0;iNs[0];i++) { - p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * (p->Ms[0] / 2 + 1))); + p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * (p->Ms[0] / 2 + 1))); } - ffts_scalar_transpose(buf, dout, p->Ms[0] / 2 + 1, p->Ns[0], p->transpose_buf); + ffts_scalar_transpose(buf, dout, p->Ms[0] / 2 + 1, p->Ns[0], p->transpose_buf); for(i=1;irank;i++) { - for(j=0;jNs[i];j++) { - p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); + for(j=0;jNs[i];j++) { + p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); } - ffts_scalar_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); + ffts_scalar_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); } } @@ -131,7 +131,7 @@ void ffts_execute_nd_real_inv(ffts_plan_t *p, const void * in, void * out) { uint64_t *buf2; uint64_t *dout = (uint64_t *)out; size_t vol = 1; - + float *bufr = (float *)(p->buf); float *doutr = (float *)out; @@ -143,15 +143,15 @@ void ffts_execute_nd_real_inv(ffts_plan_t *p, const void * in, void * out) { buf2 = buf + vol; - ffts_scalar_transpose(din, buf, p->Ms[0], p->Ns[0], p->transpose_buf); + ffts_scalar_transpose(din, buf, p->Ms[0], p->Ns[0], p->transpose_buf); for(i=0;iMs[0];i++) { - p->plans[0]->transform(p->plans[0], buf + (i * p->Ns[0]), buf2 + (i * p->Ns[0])); + p->plans[0]->transform(p->plans[0], buf + (i * p->Ns[0]), buf2 + (i * p->Ns[0])); } - - ffts_scalar_transpose(buf2, buf, p->Ns[0], p->Ms[0], p->transpose_buf); - for(j=0;jMs[1];j++) { - p->plans[1]->transform(p->plans[1], buf + (j * (p->Ms[0])), &doutr[j * p->Ns[1]]); + + ffts_scalar_transpose(buf2, buf, p->Ns[0], p->Ms[0], p->transpose_buf); + for(j=0;jMs[1];j++) { + p->plans[1]->transform(p->plans[1], buf + (j * (p->Ms[0])), &doutr[j * p->Ns[1]]); } } @@ -173,9 +173,9 @@ ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) { int i; for(i=0;iNs[i] = Ns[i]; - vol *= Ns[i]; + vol *= Ns[i]; } - + //There is probably a prettier way of doing this, but it works.. if(sign < 0) { bufsize = 2 * vol; @@ -188,7 +188,7 @@ ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) { for(i=0;iMs[i] = vol / p->Ns[i]; - + p->plans[i] = NULL; int k; @@ -196,14 +196,14 @@ ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) { for(k=1;kMs[k] == p->Ms[i]) p->plans[i] = p->plans[k]; } - if(!i) p->plans[i] = ffts_init_1d_real(p->Ms[i], sign); - else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign); + if(!i) p->plans[i] = ffts_init_1d_real(p->Ms[i], sign); + else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign); }else{ for(k=0;kNs[k] == p->Ns[i]) p->plans[i] = p->plans[k]; } - if(i==rank-1) p->plans[i] = ffts_init_1d_real(p->Ns[i], sign); - else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ns[i], sign); + if(i==rank-1) p->plans[i] = ffts_init_1d_real(p->Ns[i], sign); + else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ns[i], sign); } } if(sign < 0) { -- cgit v1.1 From e6c375a1b098afa907bb25e53adb1e203fe47370 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Carretero?= Date: Tue, 14 Oct 2014 17:54:14 -0400 Subject: do not mprotect if dynamic code generation is disabled --- src/ffts.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ffts.c b/src/ffts.c index 7c46bfc..b413c2b 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -85,6 +85,7 @@ void ffts_free_1d(ffts_plan_t *p) { //free(p->transforms); if(p->transforms) free(p->transforms); +#if !defined(DYNAMIC_DISABLED) if(p->transform_base) { if (mprotect(p->transform_base, p->transform_size, PROT_READ | PROT_WRITE)) { perror("Couldn't mprotect"); @@ -93,6 +94,7 @@ void ffts_free_1d(ffts_plan_t *p) { munmap(p->transform_base, p->transform_size); //free(p->transform_base); } +#endif free(p); } -- cgit v1.1 From 5904d949924cd327dcc21a85464672efd2dc052f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 15:15:13 +0200 Subject: YASM requires ".code 64" in assembly --- src/sse.s | 754 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 376 insertions(+), 378 deletions(-) diff --git a/src/sse.s b/src/sse.s index 79dd6ec..90f02db 100644 --- a/src/sse.s +++ b/src/sse.s @@ -9,14 +9,14 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -31,30 +31,32 @@ */ + .code64 - .globl _neon_x4 - .align 4 + .globl _neon_x4 + .align 4 _neon_x4: - .globl _neon_x8 - .align 4 + .globl _neon_x8 + .align 4 _neon_x8: - .globl _neon_x8_t - .align 4 + .globl _neon_x8_t + .align 4 _neon_x8_t: #ifdef __APPLE__ - .globl _leaf_ee_init + .globl _leaf_ee_init _leaf_ee_init: #else - .globl leaf_ee_init + .globl leaf_ee_init leaf_ee_init: #endif - #lea L_sse_constants(%rip), %r9 - movq 0xe0(%rdi), %r9 - xorl %eax, %eax + #lea L_sse_constants(%rip), %r9 + movq 0xe0(%rdi), %r9 + xorl %eax, %eax + # eax is loop counter (init to 0) # rcx is loop max count # rsi is 'in' base pointer @@ -62,48 +64,48 @@ leaf_ee_init: # r8 is offsets pointer # r9 is constants pointer # scratch: rax r11 r12 -# .align 4, 0x90 +# .align 4, 0x90 # _leaf_ee + 9 needs 16 byte alignment #ifdef __APPLE__ - .globl _leaf_ee + .globl _leaf_ee _leaf_ee: #else - .globl leaf_ee + .globl leaf_ee leaf_ee: #endif - movaps 32(%r9), %xmm0 #83.5 - movaps (%r9), %xmm8 #83.5 + movaps 32(%r9), %xmm0 #83.5 + movaps (%r9), %xmm8 #83.5 LEAF_EE_1: LEAF_EE_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm7 #83.5 LEAF_EE_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm12 #83.5 movaps %xmm7, %xmm6 #83.5 LEAF_EE_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 movaps %xmm12, %xmm11 #83.5 subps %xmm10, %xmm12 #83.5 addps %xmm10, %xmm11 #83.5 xorps %xmm8, %xmm12 #83.5 LEAF_EE_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm9 #83.5 LEAF_EE_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 addps %xmm9, %xmm6 #83.5 subps %xmm9, %xmm7 #83.5 LEAF_EE_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm13 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm13 #83.5 movaps %xmm10, %xmm9 #83.5 LEAF_EE_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm3 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm3 #83.5 movaps %xmm6, %xmm5 #83.5 LEAF_EE_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm14 #83.5 + movaps 0xFECA(%rsi,%rax,4), %xmm14 #83.5 movaps %xmm3, %xmm15 #83.5 shufps $177, %xmm12, %xmm12 #83.5 movaps %xmm7, %xmm4 #83.5 - movslq (%r8, %rax, 4), %r11 #83.44 + movslq (%r8, %rax, 4), %r11 #83.44 subps %xmm13, %xmm10 #83.5 subps %xmm14, %xmm3 #83.5 addps %xmm11, %xmm5 #83.5 @@ -112,9 +114,9 @@ LEAF_EE_const_7: addps %xmm12, %xmm7 #83.5 addps %xmm13, %xmm9 #83.5 addps %xmm14, %xmm15 #83.5 - movaps 16(%r9), %xmm12 #83.5 + movaps 16(%r9), %xmm12 #83.5 movaps %xmm9, %xmm1 #83.5 - movaps 16(%r9), %xmm11 #83.5 + movaps 16(%r9), %xmm11 #83.5 movaps %xmm5, %xmm2 #83.5 mulps %xmm10, %xmm12 #83.5 subps %xmm15, %xmm9 #83.5 @@ -144,12 +146,12 @@ LEAF_EE_const_7: movaps %xmm2, %xmm3 #83.5 shufps $177, %xmm12, %xmm12 #83.5 movaps %xmm6, %xmm9 #83.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 + movslq 8(%r8, %rax, 4), %r12 #83.59 movlhps %xmm4, %xmm3 #83.5 - addq $4, %rax + addq $4, %rax shufps $238, %xmm4, %xmm2 #83.5 movaps %xmm1, %xmm4 #83.5 - #movntdq %xmm3, (%rdx,%r11,4) #83.5 + #movntdq %xmm3, (%rdx,%r11,4) #83.5 subps %xmm12, %xmm7 #83.5 addps %xmm12, %xmm14 #83.5 movlhps %xmm7, %xmm4 #83.5 @@ -167,46 +169,44 @@ LEAF_EE_const_7: movaps %xmm1, 16(%rdx,%r12,4) #83.5 movaps %xmm5, 32(%rdx,%r12,4) #83.5 movaps %xmm6, 48(%rdx,%r12,4) #83.5 - cmpq %rcx, %rax - jne LEAF_EE_1 - - + cmpq %rcx, %rax + jne LEAF_EE_1 # _leaf_oo + 4 needs to be 16 byte aligned #ifdef __APPLE__ - .globl _leaf_oo + .globl _leaf_oo _leaf_oo: #else - .globl leaf_oo + .globl leaf_oo leaf_oo: #endif - movaps (%r9), %xmm5 #92.7 + movaps (%r9), %xmm5 #92.7 LEAF_OO_1: LEAF_OO_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm4 #93.5 movaps %xmm4, %xmm6 #93.5 LEAF_OO_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm7 #93.5 LEAF_OO_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm10 #93.5 addps %xmm7, %xmm6 #93.5 subps %xmm7, %xmm4 #93.5 LEAF_OO_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm8 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm8 #93.5 movaps %xmm10, %xmm9 #93.5 LEAF_OO_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm1 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm1 #93.5 movaps %xmm6, %xmm3 #93.5 LEAF_OO_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm11 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm11 #93.5 movaps %xmm1, %xmm2 #93.5 LEAF_OO_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm14 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm14 #93.5 movaps %xmm4, %xmm15 #93.5 LEAF_OO_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #93.5 + movaps 0xFECA(%rsi,%rax,4), %xmm12 #93.5 movaps %xmm14, %xmm13 #93.5 - movslq (%r8, %rax, 4), %r11 #83.44 + movslq (%r8, %rax, 4), %r11 #83.44 subps %xmm8, %xmm10 #93.5 addps %xmm8, %xmm9 #93.5 addps %xmm11, %xmm2 #93.5 @@ -221,8 +221,8 @@ LEAF_OO_const_7: movaps %xmm2, %xmm9 #93.5 shufps $177, %xmm14, %xmm14 #93.5 movaps %xmm6, %xmm7 #93.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 - addq $4, %rax #92.18 + movslq 8(%r8, %rax, 4), %r12 #83.59 + addq $4, %rax #92.18 addps %xmm10, %xmm4 #93.5 addps %xmm13, %xmm9 #93.5 subps %xmm13, %xmm2 #93.5 @@ -249,31 +249,31 @@ LEAF_OO_const_7: movaps %xmm6, 16(%rdx,%r12,4) #93.5 movaps %xmm9, 32(%rdx,%r12,4) #93.5 movaps %xmm2, 48(%rdx,%r12,4) #93.5 - cmpq %rcx, %rax - jne LEAF_OO_1 # Prob 95% #92.14 + cmpq %rcx, %rax + jne LEAF_OO_1 # Prob 95% #92.14 #ifdef __APPLE__ - .globl _leaf_eo + .globl _leaf_eo _leaf_eo: #else - .globl leaf_eo + .globl leaf_eo leaf_eo: #endif LEAF_EO_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm9 #88.5 LEAF_EO_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm7 #88.5 movaps %xmm9, %xmm11 #88.5 LEAF_EO_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm5 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm5 #88.5 movaps %xmm7, %xmm6 #88.5 LEAF_EO_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 subps %xmm5, %xmm7 #88.5 addps %xmm4, %xmm11 #88.5 subps %xmm4, %xmm9 #88.5 addps %xmm5, %xmm6 #88.5 - movaps (%r9), %xmm3 #88.5 + movaps (%r9), %xmm3 #88.5 movaps %xmm11, %xmm10 #88.5 xorps %xmm3, %xmm7 #88.5 movaps %xmm9, %xmm8 #88.5 @@ -282,25 +282,25 @@ LEAF_EO_const_1: subps %xmm6, %xmm11 #88.5 subps %xmm7, %xmm8 #88.5 addps %xmm7, %xmm9 #88.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 + movslq 8(%r8, %rax, 4), %r12 #83.59 movaps %xmm10, %xmm2 #88.5 - movslq (%r8, %rax, 4), %r11 #83.44 + movslq (%r8, %rax, 4), %r11 #83.44 movaps %xmm11, %xmm1 #88.5 shufps $238, %xmm8, %xmm10 #88.5 shufps $238, %xmm9, %xmm11 #88.5 movaps %xmm10, (%rdx,%r12,4) #88.5 movaps %xmm11, 16(%rdx,%r12,4) #88.5 LEAF_EO_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm15 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm15 #88.5 LEAF_EO_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm12 #88.5 movaps %xmm15, %xmm14 #88.5 LEAF_EO_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 addps %xmm12, %xmm14 #88.5 subps %xmm12, %xmm15 #88.5 LEAF_EO_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm13 #88.5 + movaps 0xFECA(%rsi,%rax,4), %xmm13 #88.5 movaps %xmm4, %xmm5 #88.5 movaps %xmm14, %xmm7 #88.5 addps %xmm13, %xmm5 #88.5 @@ -317,13 +317,13 @@ LEAF_EO_const_7: movlhps %xmm4, %xmm8 #88.5 movaps %xmm1, %xmm12 #88.5 shufps $177, %xmm15, %xmm15 #88.5 - movaps 0x30(%r9), %xmm11 #88.5 - addq $4, %rax #90.5 + movaps 0x30(%r9), %xmm11 #88.5 + addq $4, %rax #90.5 subps %xmm15, %xmm14 #88.5 mulps %xmm7, %xmm11 #88.5 addps %xmm15, %xmm4 #88.5 - movaps 0x30(%r9), %xmm9 #88.5 - movaps 0x40(%r9), %xmm15 #88.5 + movaps 0x30(%r9), %xmm9 #88.5 + movaps 0x40(%r9), %xmm15 #88.5 shufps $177, %xmm7, %xmm7 #88.5 mulps %xmm8, %xmm9 #88.5 mulps %xmm15, %xmm7 #88.5 @@ -349,31 +349,30 @@ LEAF_EO_const_7: movaps %xmm1, 16(%rdx,%r11,4) #88.5 movaps %xmm3, 32(%rdx,%r11,4) #88.5 movaps %xmm12, 48(%rdx,%r11,4) #88.5 - #ifdef __APPLE__ - .globl _leaf_oe + .globl _leaf_oe _leaf_oe: #else - .globl leaf_oe + .globl leaf_oe leaf_oe: #endif - movaps (%r9), %xmm0 #59.5 - #movaps 0x20(%r9), %xmm1 #59.5 + movaps (%r9), %xmm0 #59.5 + #movaps 0x20(%r9), %xmm1 #59.5 LEAF_OE_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm6 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm6 #70.5 LEAF_OE_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm8 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm8 #70.5 movaps %xmm6, %xmm10 #70.5 shufps $228, %xmm8, %xmm10 #70.5 movaps %xmm10, %xmm9 #70.5 shufps $228, %xmm6, %xmm8 #70.5 LEAF_OE_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm12 #70.5 LEAF_OE_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 movaps %xmm12, %xmm14 #70.5 - movslq (%r8, %rax, 4), %r11 #83.44 + movslq (%r8, %rax, 4), %r11 #83.44 addps %xmm8, %xmm9 #70.5 subps %xmm8, %xmm10 #70.5 addps %xmm7, %xmm14 #70.5 @@ -390,32 +389,32 @@ LEAF_OE_const_1: subps %xmm9, %xmm14 #70.5 shufps $238, %xmm12, %xmm5 #70.5 addps %xmm10, %xmm12 #70.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 + movslq 8(%r8, %rax, 4), %r12 #83.59 movlhps %xmm11, %xmm13 #70.5 movaps %xmm13, (%rdx,%r11,4) #70.5 - movaps 0x30(%r9), %xmm13 #70.5 + movaps 0x30(%r9), %xmm13 #70.5 movlhps %xmm12, %xmm14 #70.5 - movaps 0x40(%r9), %xmm12 #70.5 + movaps 0x40(%r9), %xmm12 #70.5 mulps %xmm5, %xmm13 #70.5 shufps $177, %xmm5, %xmm5 #70.5 mulps %xmm12, %xmm5 #70.5 movaps %xmm14, 16(%rdx,%r11,4) #70.5 subps %xmm5, %xmm13 #70.5 - movaps 0x30(%r9), %xmm5 #70.5 + movaps 0x30(%r9), %xmm5 #70.5 mulps %xmm4, %xmm5 #70.5 shufps $177, %xmm4, %xmm4 #70.5 mulps %xmm12, %xmm4 #70.5 LEAF_OE_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm9 #70.5 addps %xmm4, %xmm5 #70.5 LEAF_OE_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 movaps %xmm9, %xmm3 #70.5 LEAF_OE_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm2 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm2 #70.5 movaps %xmm7, %xmm6 #70.5 LEAF_OE_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm15 #70.5 + movaps 0xFECA(%rsi,%rax,4), %xmm15 #70.5 movaps %xmm13, %xmm4 #70.5 subps %xmm2, %xmm7 #70.5 addps %xmm15, %xmm3 #70.5 @@ -424,7 +423,7 @@ LEAF_OE_const_5: subps %xmm5, %xmm13 #70.5 addps %xmm5, %xmm4 #70.5 xorps %xmm0, %xmm7 #70.5 - addq $4, %rax #72.5 + addq $4, %rax #72.5 movaps %xmm3, %xmm2 #70.5 shufps $177, %xmm7, %xmm7 #70.5 movaps %xmm9, %xmm8 #70.5 @@ -452,37 +451,36 @@ LEAF_OE_const_5: movaps %xmm3, 16(%rdx,%r12,4) #70.5 movaps %xmm14, 32(%rdx,%r12,4) #70.5 movaps %xmm4, 48(%rdx,%r12,4) #70.5 - - + #ifdef __APPLE__ - .globl _leaf_end + .globl _leaf_end _leaf_end: #else - .globl leaf_end + .globl leaf_end leaf_end: #endif #ifdef __APPLE__ - .globl _x_init + .globl _x_init _x_init: #else - .globl x_init + .globl x_init x_init: #endif - #movaps L_sse_constants(%rip), %xmm3 #34.3 - movaps (%r9), %xmm3 #34.3 - movq 0x20(%rdi),%r8 + #movaps L_sse_constants(%rip), %xmm3 #34.3 + movaps (%r9), %xmm3 #34.3 + movq 0x20(%rdi), %r8 #ifdef __APPLE__ - .globl _x4 + .globl _x4 _x4: #else - .globl x4 + .globl x4 x4: #endif movaps 64(%rdx), %xmm0 #34.3 movaps 96(%rdx), %xmm1 #34.3 movaps (%rdx), %xmm7 #34.3 - movaps (%r8), %xmm4 #const + movaps (%r8), %xmm4 #const movaps %xmm7, %xmm9 #34.3 movaps %xmm4, %xmm6 #34.3 movaps 16(%r8), %xmm2 #const @@ -510,10 +508,10 @@ x4: movaps %xmm8, 32(%rdx) #34.3 movaps %xmm9, 64(%rdx) #34.3 movaps %xmm10, 96(%rdx) #34.3 - movaps 32(%r8), %xmm14 #const #34.3 + movaps 32(%r8), %xmm14 #const #34.3 movaps 80(%rdx), %xmm11 #34.3 movaps %xmm14, %xmm0 #34.3 - movaps 48(%r8), %xmm13 #const #34.3 + movaps 48(%r8), %xmm13 #const #34.3 mulps %xmm11, %xmm0 #34.3 mulps %xmm12, %xmm14 #34.3 shufps $177, %xmm11, %xmm11 #34.3 @@ -539,340 +537,340 @@ x4: movaps %xmm2, 48(%rdx) #34.3 movaps %xmm4, 80(%rdx) #34.3 movaps %xmm5, 112(%rdx) #34.3 - ret - + ret + # _x8_soft + 5 needs to be 16 byte aligned #ifdef __APPLE__ - .globl _x8_soft + .globl _x8_soft _x8_soft: #else - .globl x8_soft + .globl x8_soft x8_soft: #endif - xorl %eax, %eax - movq %rdx, %rbx + xorl %eax, %eax + movq %rdx, %rbx movq %r8, %rsi - leaq (%rdx,%rcx,4), %r9 - leaq (%r9,%rcx,4), %r10 - leaq (%r10,%rcx,4), %r11 - leaq (%r11,%rcx,4), %r12 - leaq (%r12,%rcx,4), %r13 - leaq (%r13,%rcx,4), %r14 - leaq (%r14,%rcx,4), %r15 -X8_soft_loop: - movaps (%rsi), %xmm9 + leaq (%rdx,%rcx,4), %r9 + leaq (%r9,%rcx,4), %r10 + leaq (%r10,%rcx,4), %r11 + leaq (%r11,%rcx,4), %r12 + leaq (%r12,%rcx,4), %r13 + leaq (%r13,%rcx,4), %r14 + leaq (%r14,%rcx,4), %r15 +X8_soft_loop: + movaps (%rsi), %xmm9 movaps (%r10,%rax,4), %xmm6 - movaps %xmm9, %xmm11 + movaps %xmm9, %xmm11 movaps (%r11,%rax,4), %xmm7 - movaps 16(%rsi), %xmm8 - mulps %xmm6, %xmm11 - mulps %xmm7, %xmm9 - shufps $177, %xmm6, %xmm6 - mulps %xmm8, %xmm6 - shufps $177, %xmm7, %xmm7 - subps %xmm6, %xmm11 - mulps %xmm7, %xmm8 - movaps %xmm11, %xmm10 - addps %xmm8, %xmm9 - movaps 32(%rsi), %xmm15 - addps %xmm9, %xmm10 - subps %xmm9, %xmm11 - movaps (%rbx,%rax,4), %xmm5 - movaps %xmm15, %xmm6 + movaps 16(%rsi), %xmm8 + mulps %xmm6, %xmm11 + mulps %xmm7, %xmm9 + shufps $177, %xmm6, %xmm6 + mulps %xmm8, %xmm6 + shufps $177, %xmm7, %xmm7 + subps %xmm6, %xmm11 + mulps %xmm7, %xmm8 + movaps %xmm11, %xmm10 + addps %xmm8, %xmm9 + movaps 32(%rsi), %xmm15 + addps %xmm9, %xmm10 + subps %xmm9, %xmm11 + movaps (%rbx,%rax,4), %xmm5 + movaps %xmm15, %xmm6 movaps (%r12,%rax,4), %xmm12 - movaps %xmm5, %xmm2 + movaps %xmm5, %xmm2 movaps (%r14,%rax,4), %xmm13 - xorps %xmm3, %xmm11 #const - movaps 48(%rsi), %xmm14 - subps %xmm10, %xmm2 - mulps %xmm12, %xmm6 - addps %xmm10, %xmm5 - mulps %xmm13, %xmm15 - movaps 64(%rsi), %xmm10 - movaps %xmm5, %xmm0 - shufps $177, %xmm12, %xmm12 - shufps $177, %xmm13, %xmm13 - mulps %xmm14, %xmm12 - mulps %xmm13, %xmm14 - subps %xmm12, %xmm6 - addps %xmm14, %xmm15 - movaps (%r13,%rax,4), %xmm7 - movaps %xmm10, %xmm13 - movaps (%r15,%rax,4), %xmm8 - movaps %xmm6, %xmm12 - movaps 80(%rsi), %xmm9 - addq $96, %rsi - mulps %xmm7, %xmm13 - subps %xmm15, %xmm6 - addps %xmm15, %xmm12 - mulps %xmm8, %xmm10 - subps %xmm12, %xmm0 - addps %xmm12, %xmm5 - shufps $177, %xmm7, %xmm7 - xorps %xmm3, %xmm6 #const - shufps $177, %xmm8, %xmm8 - movaps %xmm2, %xmm12 - mulps %xmm9, %xmm7 - mulps %xmm8, %xmm9 - subps %xmm7, %xmm13 - addps %xmm9, %xmm10 - movaps (%r9,%rax,4), %xmm4 - shufps $177, %xmm11, %xmm11 - movaps %xmm4, %xmm1 - shufps $177, %xmm6, %xmm6 - addps %xmm11, %xmm1 - subps %xmm11, %xmm4 - addps %xmm6, %xmm12 - subps %xmm6, %xmm2 - movaps %xmm13, %xmm11 - movaps %xmm4, %xmm14 - movaps %xmm1, %xmm6 - subps %xmm10, %xmm13 - addps %xmm10, %xmm11 - xorps %xmm3, %xmm13 #const - addps %xmm11, %xmm4 - subps %xmm11, %xmm14 - shufps $177, %xmm13, %xmm13 - movaps %xmm5, (%rbx,%rax,4) - movaps %xmm4, (%r9,%rax,4) - movaps %xmm2, (%r10,%rax,4) - subps %xmm13, %xmm1 - addps %xmm13, %xmm6 - movaps %xmm1, (%r11,%rax,4) - movaps %xmm0, (%r12,%rax,4) - movaps %xmm14, (%r13,%rax,4) - movaps %xmm12, (%r14,%rax,4) - movaps %xmm6, (%r15,%rax,4) - addq $4, %rax - cmpq %rcx, %rax + xorps %xmm3, %xmm11 #const + movaps 48(%rsi), %xmm14 + subps %xmm10, %xmm2 + mulps %xmm12, %xmm6 + addps %xmm10, %xmm5 + mulps %xmm13, %xmm15 + movaps 64(%rsi), %xmm10 + movaps %xmm5, %xmm0 + shufps $177, %xmm12, %xmm12 + shufps $177, %xmm13, %xmm13 + mulps %xmm14, %xmm12 + mulps %xmm13, %xmm14 + subps %xmm12, %xmm6 + addps %xmm14, %xmm15 + movaps (%r13,%rax,4), %xmm7 + movaps %xmm10, %xmm13 + movaps (%r15,%rax,4), %xmm8 + movaps %xmm6, %xmm12 + movaps 80(%rsi), %xmm9 + addq $96, %rsi + mulps %xmm7, %xmm13 + subps %xmm15, %xmm6 + addps %xmm15, %xmm12 + mulps %xmm8, %xmm10 + subps %xmm12, %xmm0 + addps %xmm12, %xmm5 + shufps $177, %xmm7, %xmm7 + xorps %xmm3, %xmm6 #const + shufps $177, %xmm8, %xmm8 + movaps %xmm2, %xmm12 + mulps %xmm9, %xmm7 + mulps %xmm8, %xmm9 + subps %xmm7, %xmm13 + addps %xmm9, %xmm10 + movaps (%r9,%rax,4), %xmm4 + shufps $177, %xmm11, %xmm11 + movaps %xmm4, %xmm1 + shufps $177, %xmm6, %xmm6 + addps %xmm11, %xmm1 + subps %xmm11, %xmm4 + addps %xmm6, %xmm12 + subps %xmm6, %xmm2 + movaps %xmm13, %xmm11 + movaps %xmm4, %xmm14 + movaps %xmm1, %xmm6 + subps %xmm10, %xmm13 + addps %xmm10, %xmm11 + xorps %xmm3, %xmm13 #const + addps %xmm11, %xmm4 + subps %xmm11, %xmm14 + shufps $177, %xmm13, %xmm13 + movaps %xmm5, (%rbx,%rax,4) + movaps %xmm4, (%r9,%rax,4) + movaps %xmm2, (%r10,%rax,4) + subps %xmm13, %xmm1 + addps %xmm13, %xmm6 + movaps %xmm1, (%r11,%rax,4) + movaps %xmm0, (%r12,%rax,4) + movaps %xmm14, (%r13,%rax,4) + movaps %xmm12, (%r14,%rax,4) + movaps %xmm6, (%r15,%rax,4) + addq $4, %rax + cmpq %rcx, %rax jne X8_soft_loop - ret + ret #ifdef __APPLE__ - .globl _x8_hard + .globl _x8_hard _x8_hard: #else - .globl x8_hard + .globl x8_hard x8_hard: #endif - movaps (%r9), %xmm5 -X8_loop: - movaps (%r8), %xmm9 + movaps (%r9), %xmm5 +X8_loop: + movaps (%r8), %xmm9 X8_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm6 - movaps %xmm9, %xmm11 + movaps 0xFECA(%rdx,%rax,4), %xmm6 + movaps %xmm9, %xmm11 X8_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm7 - movaps 16(%r8), %xmm8 - mulps %xmm6, %xmm11 - mulps %xmm7, %xmm9 - shufps $177, %xmm6, %xmm6 - mulps %xmm8, %xmm6 - shufps $177, %xmm7, %xmm7 - subps %xmm6, %xmm11 - mulps %xmm7, %xmm8 - movaps %xmm11, %xmm10 - addps %xmm8, %xmm9 - movaps 32(%r8), %xmm15 - addps %xmm9, %xmm10 - subps %xmm9, %xmm11 + movaps 0xFECA(%rdx,%rax,4), %xmm7 + movaps 16(%r8), %xmm8 + mulps %xmm6, %xmm11 + mulps %xmm7, %xmm9 + shufps $177, %xmm6, %xmm6 + mulps %xmm8, %xmm6 + shufps $177, %xmm7, %xmm7 + subps %xmm6, %xmm11 + mulps %xmm7, %xmm8 + movaps %xmm11, %xmm10 + addps %xmm8, %xmm9 + movaps 32(%r8), %xmm15 + addps %xmm9, %xmm10 + subps %xmm9, %xmm11 X8_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm3 - movaps %xmm15, %xmm6 + movaps 0xFECA(%rdx,%rax,4), %xmm3 + movaps %xmm15, %xmm6 X8_const_4: movaps 0xFECA(%rdx,%rax,4), %xmm12 - movaps %xmm3, %xmm2 + movaps %xmm3, %xmm2 X8_const_6: movaps 0xFECA(%rdx,%rax,4), %xmm13 - xorps %xmm5, %xmm11 - movaps 48(%r8), %xmm14 - subps %xmm10, %xmm2 - mulps %xmm12, %xmm6 - addps %xmm10, %xmm3 - mulps %xmm13, %xmm15 - movaps 64(%r8), %xmm10 - movaps %xmm3, %xmm0 - shufps $177, %xmm12, %xmm12 - shufps $177, %xmm13, %xmm13 - mulps %xmm14, %xmm12 - mulps %xmm13, %xmm14 - subps %xmm12, %xmm6 - addps %xmm14, %xmm15 + xorps %xmm5, %xmm11 + movaps 48(%r8), %xmm14 + subps %xmm10, %xmm2 + mulps %xmm12, %xmm6 + addps %xmm10, %xmm3 + mulps %xmm13, %xmm15 + movaps 64(%r8), %xmm10 + movaps %xmm3, %xmm0 + shufps $177, %xmm12, %xmm12 + shufps $177, %xmm13, %xmm13 + mulps %xmm14, %xmm12 + mulps %xmm13, %xmm14 + subps %xmm12, %xmm6 + addps %xmm14, %xmm15 X8_const_5: movaps 0xFECA(%rdx,%rax,4), %xmm7 - movaps %xmm10, %xmm13 + movaps %xmm10, %xmm13 X8_const_7: movaps 0xFECA(%rdx,%rax,4), %xmm8 - movaps %xmm6, %xmm12 - movaps 80(%r8), %xmm9 - addq $96, %r8 - mulps %xmm7, %xmm13 - subps %xmm15, %xmm6 - addps %xmm15, %xmm12 - mulps %xmm8, %xmm10 - subps %xmm12, %xmm0 - addps %xmm12, %xmm3 - shufps $177, %xmm7, %xmm7 - xorps %xmm5, %xmm6 - shufps $177, %xmm8, %xmm8 - movaps %xmm2, %xmm12 - mulps %xmm9, %xmm7 - mulps %xmm8, %xmm9 - subps %xmm7, %xmm13 - addps %xmm9, %xmm10 + movaps %xmm6, %xmm12 + movaps 80(%r8), %xmm9 + addq $96, %r8 + mulps %xmm7, %xmm13 + subps %xmm15, %xmm6 + addps %xmm15, %xmm12 + mulps %xmm8, %xmm10 + subps %xmm12, %xmm0 + addps %xmm12, %xmm3 + shufps $177, %xmm7, %xmm7 + xorps %xmm5, %xmm6 + shufps $177, %xmm8, %xmm8 + movaps %xmm2, %xmm12 + mulps %xmm9, %xmm7 + mulps %xmm8, %xmm9 + subps %xmm7, %xmm13 + addps %xmm9, %xmm10 X8_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm4 - shufps $177, %xmm11, %xmm11 - movaps %xmm4, %xmm1 - shufps $177, %xmm6, %xmm6 - addps %xmm11, %xmm1 - subps %xmm11, %xmm4 - addps %xmm6, %xmm12 - subps %xmm6, %xmm2 - movaps %xmm13, %xmm11 - movaps %xmm4, %xmm14 - movaps %xmm1, %xmm6 - subps %xmm10, %xmm13 - addps %xmm10, %xmm11 - xorps %xmm5, %xmm13 - addps %xmm11, %xmm4 - subps %xmm11, %xmm14 - shufps $177, %xmm13, %xmm13 + movaps 0xFECA(%rdx,%rax,4), %xmm4 + shufps $177, %xmm11, %xmm11 + movaps %xmm4, %xmm1 + shufps $177, %xmm6, %xmm6 + addps %xmm11, %xmm1 + subps %xmm11, %xmm4 + addps %xmm6, %xmm12 + subps %xmm6, %xmm2 + movaps %xmm13, %xmm11 + movaps %xmm4, %xmm14 + movaps %xmm1, %xmm6 + subps %xmm10, %xmm13 + addps %xmm10, %xmm11 + xorps %xmm5, %xmm13 + addps %xmm11, %xmm4 + subps %xmm11, %xmm14 + shufps $177, %xmm13, %xmm13 X8_const1_0: movaps %xmm3, 0xFECA(%rdx,%rax,4) X8_const1_1: movaps %xmm4, 0xFECA(%rdx,%rax,4) X8_const1_2: - movaps %xmm2, 0xFECA(%rdx,%rax,4) - subps %xmm13, %xmm1 - addps %xmm13, %xmm6 + movaps %xmm2, 0xFECA(%rdx,%rax,4) + subps %xmm13, %xmm1 + addps %xmm13, %xmm6 X8_const1_3: - movaps %xmm1, 0xFECA(%rdx,%rax,4) + movaps %xmm1, 0xFECA(%rdx,%rax,4) X8_const1_4: movaps %xmm0, 0xFECA(%rdx,%rax,4) X8_const1_5: movaps %xmm14, 0xFECA(%rdx,%rax,4) X8_const1_6: - movaps %xmm12, 0xFECA(%rdx,%rax,4) + movaps %xmm12, 0xFECA(%rdx,%rax,4) X8_const1_7: movaps %xmm6, 0xFECA(%rdx,%rax,4) - addq $4, %rax - cmpq %rcx, %rax + addq $4, %rax + cmpq %rcx, %rax jne X8_loop -#ifdef __APPLE__ - .globl _sse_leaf_ee_offsets - .globl _sse_leaf_oo_offsets - .globl _sse_leaf_eo_offsets - .globl _sse_leaf_oe_offsets - .align 4 +#ifdef __APPLE__ + .globl _sse_leaf_ee_offsets + .globl _sse_leaf_oo_offsets + .globl _sse_leaf_eo_offsets + .globl _sse_leaf_oe_offsets + .align 4 _sse_leaf_ee_offsets: - .long LEAF_EE_const_0-_leaf_ee+0x4 - .long LEAF_EE_const_1-_leaf_ee+0x5 - .long LEAF_EE_const_2-_leaf_ee+0x5 - .long LEAF_EE_const_3-_leaf_ee+0x5 - .long LEAF_EE_const_4-_leaf_ee+0x5 - .long LEAF_EE_const_5-_leaf_ee+0x5 - .long LEAF_EE_const_6-_leaf_ee+0x4 - .long LEAF_EE_const_7-_leaf_ee+0x5 + .long LEAF_EE_const_0-_leaf_ee+0x4 + .long LEAF_EE_const_1-_leaf_ee+0x5 + .long LEAF_EE_const_2-_leaf_ee+0x5 + .long LEAF_EE_const_3-_leaf_ee+0x5 + .long LEAF_EE_const_4-_leaf_ee+0x5 + .long LEAF_EE_const_5-_leaf_ee+0x5 + .long LEAF_EE_const_6-_leaf_ee+0x4 + .long LEAF_EE_const_7-_leaf_ee+0x5 _sse_leaf_oo_offsets: - .long LEAF_OO_const_0-_leaf_oo+0x4 - .long LEAF_OO_const_1-_leaf_oo+0x4 - .long LEAF_OO_const_2-_leaf_oo+0x5 - .long LEAF_OO_const_3-_leaf_oo+0x5 - .long LEAF_OO_const_4-_leaf_oo+0x4 - .long LEAF_OO_const_5-_leaf_oo+0x5 - .long LEAF_OO_const_6-_leaf_oo+0x5 - .long LEAF_OO_const_7-_leaf_oo+0x5 + .long LEAF_OO_const_0-_leaf_oo+0x4 + .long LEAF_OO_const_1-_leaf_oo+0x4 + .long LEAF_OO_const_2-_leaf_oo+0x5 + .long LEAF_OO_const_3-_leaf_oo+0x5 + .long LEAF_OO_const_4-_leaf_oo+0x4 + .long LEAF_OO_const_5-_leaf_oo+0x5 + .long LEAF_OO_const_6-_leaf_oo+0x5 + .long LEAF_OO_const_7-_leaf_oo+0x5 _sse_leaf_eo_offsets: - .long LEAF_EO_const_0-_leaf_eo+0x5 - .long LEAF_EO_const_1-_leaf_eo+0x4 - .long LEAF_EO_const_2-_leaf_eo+0x4 - .long LEAF_EO_const_3-_leaf_eo+0x4 - .long LEAF_EO_const_4-_leaf_eo+0x5 - .long LEAF_EO_const_5-_leaf_eo+0x5 - .long LEAF_EO_const_6-_leaf_eo+0x4 - .long LEAF_EO_const_7-_leaf_eo+0x5 + .long LEAF_EO_const_0-_leaf_eo+0x5 + .long LEAF_EO_const_1-_leaf_eo+0x4 + .long LEAF_EO_const_2-_leaf_eo+0x4 + .long LEAF_EO_const_3-_leaf_eo+0x4 + .long LEAF_EO_const_4-_leaf_eo+0x5 + .long LEAF_EO_const_5-_leaf_eo+0x5 + .long LEAF_EO_const_6-_leaf_eo+0x4 + .long LEAF_EO_const_7-_leaf_eo+0x5 _sse_leaf_oe_offsets: - .long LEAF_OE_const_0-_leaf_oe+0x5 - .long LEAF_OE_const_1-_leaf_oe+0x4 - .long LEAF_OE_const_2-_leaf_oe+0x4 - .long LEAF_OE_const_3-_leaf_oe+0x5 - .long LEAF_OE_const_4-_leaf_oe+0x5 - .long LEAF_OE_const_5-_leaf_oe+0x5 - .long LEAF_OE_const_6-_leaf_oe+0x4 - .long LEAF_OE_const_7-_leaf_oe+0x4 + .long LEAF_OE_const_0-_leaf_oe+0x5 + .long LEAF_OE_const_1-_leaf_oe+0x4 + .long LEAF_OE_const_2-_leaf_oe+0x4 + .long LEAF_OE_const_3-_leaf_oe+0x5 + .long LEAF_OE_const_4-_leaf_oe+0x5 + .long LEAF_OE_const_5-_leaf_oe+0x5 + .long LEAF_OE_const_6-_leaf_oe+0x4 + .long LEAF_OE_const_7-_leaf_oe+0x4 #else - .globl sse_leaf_ee_offsets - .globl sse_leaf_oo_offsets - .globl sse_leaf_eo_offsets - .globl sse_leaf_oe_offsets - .align 4 + .globl sse_leaf_ee_offsets + .globl sse_leaf_oo_offsets + .globl sse_leaf_eo_offsets + .globl sse_leaf_oe_offsets + .align 4 sse_leaf_ee_offsets: - .long LEAF_EE_const_0-leaf_ee+0x4 - .long LEAF_EE_const_1-leaf_ee+0x5 - .long LEAF_EE_const_2-leaf_ee+0x5 - .long LEAF_EE_const_3-leaf_ee+0x5 - .long LEAF_EE_const_4-leaf_ee+0x5 - .long LEAF_EE_const_5-leaf_ee+0x5 - .long LEAF_EE_const_6-leaf_ee+0x4 - .long LEAF_EE_const_7-leaf_ee+0x5 + .long LEAF_EE_const_0-leaf_ee+0x4 + .long LEAF_EE_const_1-leaf_ee+0x5 + .long LEAF_EE_const_2-leaf_ee+0x5 + .long LEAF_EE_const_3-leaf_ee+0x5 + .long LEAF_EE_const_4-leaf_ee+0x5 + .long LEAF_EE_const_5-leaf_ee+0x5 + .long LEAF_EE_const_6-leaf_ee+0x4 + .long LEAF_EE_const_7-leaf_ee+0x5 sse_leaf_oo_offsets: - .long LEAF_OO_const_0-leaf_oo+0x4 - .long LEAF_OO_const_1-leaf_oo+0x4 - .long LEAF_OO_const_2-leaf_oo+0x5 - .long LEAF_OO_const_3-leaf_oo+0x5 - .long LEAF_OO_const_4-leaf_oo+0x4 - .long LEAF_OO_const_5-leaf_oo+0x5 - .long LEAF_OO_const_6-leaf_oo+0x5 - .long LEAF_OO_const_7-leaf_oo+0x5 + .long LEAF_OO_const_0-leaf_oo+0x4 + .long LEAF_OO_const_1-leaf_oo+0x4 + .long LEAF_OO_const_2-leaf_oo+0x5 + .long LEAF_OO_const_3-leaf_oo+0x5 + .long LEAF_OO_const_4-leaf_oo+0x4 + .long LEAF_OO_const_5-leaf_oo+0x5 + .long LEAF_OO_const_6-leaf_oo+0x5 + .long LEAF_OO_const_7-leaf_oo+0x5 sse_leaf_eo_offsets: - .long LEAF_EO_const_0-leaf_eo+0x5 - .long LEAF_EO_const_1-leaf_eo+0x4 - .long LEAF_EO_const_2-leaf_eo+0x4 - .long LEAF_EO_const_3-leaf_eo+0x4 - .long LEAF_EO_const_4-leaf_eo+0x5 - .long LEAF_EO_const_5-leaf_eo+0x5 - .long LEAF_EO_const_6-leaf_eo+0x4 - .long LEAF_EO_const_7-leaf_eo+0x5 + .long LEAF_EO_const_0-leaf_eo+0x5 + .long LEAF_EO_const_1-leaf_eo+0x4 + .long LEAF_EO_const_2-leaf_eo+0x4 + .long LEAF_EO_const_3-leaf_eo+0x4 + .long LEAF_EO_const_4-leaf_eo+0x5 + .long LEAF_EO_const_5-leaf_eo+0x5 + .long LEAF_EO_const_6-leaf_eo+0x4 + .long LEAF_EO_const_7-leaf_eo+0x5 sse_leaf_oe_offsets: - .long LEAF_OE_const_0-leaf_oe+0x5 - .long LEAF_OE_const_1-leaf_oe+0x4 - .long LEAF_OE_const_2-leaf_oe+0x4 - .long LEAF_OE_const_3-leaf_oe+0x5 - .long LEAF_OE_const_4-leaf_oe+0x5 - .long LEAF_OE_const_5-leaf_oe+0x5 - .long LEAF_OE_const_6-leaf_oe+0x4 - .long LEAF_OE_const_7-leaf_oe+0x4 + .long LEAF_OE_const_0-leaf_oe+0x5 + .long LEAF_OE_const_1-leaf_oe+0x4 + .long LEAF_OE_const_2-leaf_oe+0x4 + .long LEAF_OE_const_3-leaf_oe+0x5 + .long LEAF_OE_const_4-leaf_oe+0x5 + .long LEAF_OE_const_5-leaf_oe+0x5 + .long LEAF_OE_const_6-leaf_oe+0x4 + .long LEAF_OE_const_7-leaf_oe+0x4 #endif #ifdef __APPLE__ - .data + .data #else - .section .data + .section .data #endif - .p2align 4 -#ifdef __APPLE__ - .globl _sse_constants + .p2align 4 +#ifdef __APPLE__ + .globl _sse_constants _sse_constants: #else - .globl sse_constants + .globl sse_constants sse_constants: #endif - .long 0x00000000,0x80000000,0x00000000,0x80000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3 -#ifdef __APPLE__ - .globl _sse_constants_inv + .long 0x00000000,0x80000000,0x00000000,0x80000000 + .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 + .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3 + .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 + .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3 +#ifdef __APPLE__ + .globl _sse_constants_inv _sse_constants_inv: #else - .globl sse_constants_inv + .globl sse_constants_inv sse_constants_inv: #endif - .long 0x80000000,0x00000000,0x80000000,0x00000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3 + .long 0x80000000,0x00000000,0x80000000,0x00000000 + .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 + .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3 + .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 + .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3 -- cgit v1.1 From 22e709c024c85047b94b9a31cbdf0d2550606b2a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 15:34:58 +0200 Subject: Cleaning to make ISO C90 compatible --- include/ffts.h | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/include/ffts.h b/include/ffts.h index 63173bb..8e25cb4 100644 --- a/include/ffts.h +++ b/include/ffts.h @@ -1,7 +1,7 @@ /* - + This file is part of FFTS. - + Copyright (c) 2012, Anthony M. Blake All rights reserved. @@ -29,19 +29,18 @@ */ -#ifndef __FFTS_H__ -#define __FFTS_H__ +#ifndef FFTS_H +#define FFTS_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif -#include -#include -#include -#include #include #ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ +extern "C" { +#endif #define POSITIVE_SIGN 1 #define NEGATIVE_SIGN -1 @@ -53,20 +52,20 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign); ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign); ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); -// For real transforms, sign == -1 implies a real-to-complex forwards tranform, -// and sign == 1 implies a complex-to-real backwards transform -// The output of a real-to-complex transform is N/2+1 complex numbers, where the -// redundant outputs have been omitted. +/* For real transforms, sign == -1 implies a real-to-complex forwards tranform, + and sign == 1 implies a complex-to-real backwards transform. + The output of a real-to-complex transform is N/2+1 complex numbers, + where the redundant outputs have been omitted. +*/ ffts_plan_t *ffts_init_1d_real(size_t N, int sign); ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign); ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); -void ffts_execute(ffts_plan_t * , const void *input, void *output); -void ffts_free(ffts_plan_t *); +void ffts_execute(ffts_plan_t *p, const void *input, void *output); +void ffts_free(ffts_plan_t *p); #ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - +} #endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + +#endif /* FFTS_H */ -- cgit v1.1 From be60ff68d4388c3f3034cd75029566beba5f9279 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 15:54:19 +0200 Subject: Add macro definitions for various function/variable attributes; aligned, inlining.. Cleaning of test application --- src/ffts_attributes.h | 77 ++++++++++++++++ tests/test.c | 249 ++++++++++++++++++++++++++------------------------ 2 files changed, 209 insertions(+), 117 deletions(-) create mode 100644 src/ffts_attributes.h diff --git a/src/ffts_attributes.h b/src/ffts_attributes.h new file mode 100644 index 0000000..6ac2ac3 --- /dev/null +++ b/src/ffts_attributes.h @@ -0,0 +1,77 @@ +/* + + This file is part of FFTS -- The Fastest Fourier Transform in the South + + Copyright (c) 2012, Anthony M. Blake + Copyright (c) 2012, The University of Waikato + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef FFTS_ATTRIBUTES_H +#define FFTS_ATTRIBUTES_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + +/* Macro definitions for various function/variable attributes */ +#ifdef __GNUC__ +#define GCC_VERSION_AT_LEAST(x,y) \ + (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y) +#else +#define GCC_VERSION_AT_LEAST(x,y) 0 +#endif + +#ifdef __GNUC__ +#define FFTS_ALIGN(x) __attribute__((aligned(x))) +#elif defined(_MSC_VER) +#define FFTS_ALIGN(x) __declspec(align(x)) +#else +#define FFTS_ALIGN(x) +#endif + +#if GCC_VERSION_AT_LEAST(3,1) +#define FFTS_ALWAYS_INLINE __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +#define FFTS_ALWAYS_INLINE __forceinline +#else +#define FFTS_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define FFTS_INLINE __inline +#else +#define FFTS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define FFTS_RESTRICT +#else +#define FFTS_RESTRICT __restrict +#endif + +#endif /* FFTS_ATTRIBUTES_H */ diff --git a/tests/test.c b/tests/test.c index 7ab79c6..b1e5509 100644 --- a/tests/test.c +++ b/tests/test.c @@ -1,7 +1,7 @@ /* - - This file is part of SFFT. - + + This file is part of FFTS. + Copyright (c) 2012, Anthony M. Blake All rights reserved. @@ -29,149 +29,164 @@ */ -#include -#include +#include "../include/ffts.h" +#include "../src/ffts_attributes.h" #ifdef __ARM_NEON__ #endif -#ifdef HAVE_SSE - #include -#endif -#include "../include/ffts.h" +#ifdef HAVE_SSE +#include +#endif +#include +#include +#include -#define PI 3.1415926535897932384626433832795028841971693993751058209 +#ifndef M_PI +#define M_PI 3.1415926535897932384626433832795028841971693993751058209 +#endif -float impulse_error(int N, int sign, float *data) { +static float impulse_error(int N, int sign, float *data) +{ #ifdef __ANDROID__ - double delta_sum = 0.0f; - double sum = 0.0f; + double delta_sum = 0.0f; + double sum = 0.0f; #else - long double delta_sum = 0.0f; - long double sum = 0.0f; -#endif + long double delta_sum = 0.0f; + long double sum = 0.0f; +#endif + int i; - int i; - for(i=0;i Date: Wed, 29 Oct 2014 15:59:56 +0200 Subject: "C++ style comments are not allowed in ISO C90" --- tests/test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test.c b/tests/test.c index b1e5509..9559095 100644 --- a/tests/test.c +++ b/tests/test.c @@ -151,7 +151,7 @@ int main(int argc, char *argv[]) input[2*i + 1] = 0.0f; } - // input[2] = 1.0f; + /* input[2] = 1.0f; */ p = ffts_init_1d(i, sign); if (!p) { -- cgit v1.1 From c602cee1b51e8c532e4817d41d973deea8ab257a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 16:13:33 +0200 Subject: Cleaning and reorganizing --- src/ffts_nd.c | 581 ++++++++++++++++++++++++++++++----------------------- src/ffts_nd.h | 36 +--- src/ffts_real.c | 452 +++++++++++++++++++++++------------------ src/ffts_real.h | 30 +-- src/ffts_real_nd.c | 432 ++++++++++++++++++++++++--------------- src/ffts_real_nd.h | 33 ++- src/ffts_small.c | 208 ++++++++++--------- src/ffts_small.h | 22 +- 8 files changed, 1015 insertions(+), 779 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 15fc4d1..f982403 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -1,33 +1,33 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -35,249 +35,330 @@ #ifdef HAVE_NEON #include "neon.h" +#include +#endif + +#ifdef HAVE_SSE +#include #endif -void ffts_free_nd(ffts_plan_t *p) { - - int i; - for(i=0;irank;i++) { - - ffts_plan_t *x = p->plans[i]; - int k; - for(k=0;kMs[i] == p->Ms[k]) x = NULL; - } - - if(x) ffts_free(x); - } - - free(p->Ns); - free(p->Ms); - free(p->plans); - free(p->buf); - free(p->transpose_buf); - free(p); -} #define TSIZE 8 -#include -void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) { - -#ifdef HAVE_NEON - size_t i,j,k; - int linebytes = w*8; - - for(j=0;jplans) { + int i; + + for (i = 0; i < p->rank; i++) { + ffts_plan_t *plan = p->plans[i]; + + if (plan) { + int k; + + for (k = 0; k < i; k++) { + if (p->Ms[i] == p->Ms[k]) { + plan = NULL; + break; + } + } + + if (plan) { + ffts_free(plan); + } + } + } + + free(p->plans); + } + + if (p->Ns) { + free(p->Ns); + } + + if (p->Ms) { + free(p->Ms); + } + + if (p->buf) { + ffts_aligned_free(p->buf); + } + + if (p->transpose_buf) { + ffts_aligned_free(p->transpose_buf); + } + + free(p); +} + +static void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) +{ +#ifdef HAVE_NEON + size_t i, j, k; + int linebytes = 8 * w; + + for (j = 0; j < h; j += 8) { + for (i = 0; i < w; i += 8) { + neon_transpose_to_buf(in + j*w + i, buf, w); + + uint64_t *p = out + i*h + j; + uint64_t *pbuf = buf; + uint64_t *ptemp; + + __asm__ __volatile__( + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" + "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" + "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" + "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" + "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" + "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" + "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" + "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" + "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" + "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" + "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" + "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" + "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" + "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" + "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" + "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" + "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" + "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" + "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "add %[p], %[p], %[w], lsl #3\n\t" + "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" + "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" + "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" + "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" + "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" + "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" + "mov %[ptemp], %[p]\n\t" + "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" + "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" + + : [p] "+r" (p), [pbuf] "+r" (pbuf), [ptemp] "+r" (ptemp) + : [w] "r" (w) + : "memory", "q8", "q9", "q10", "q11" + ); + + /* out[i*h + j] = in[j*w + i]; */ + } + } #else #ifdef HAVE_SSE - uint64_t tmp[TSIZE*TSIZE] __attribute__((aligned(64))); - int tx, ty; - int x, y; - int tw = w / TSIZE; - int th = h / TSIZE; - for (ty=0;tybuf; + uint64_t *dout = (uint64_t*) out; + + ffts_plan_t *plan; + size_t i, j; - uint64_t *din = (uint64_t *)in; - uint64_t *buf = p->buf; - uint64_t *dout = (uint64_t *)out; + plan = p->plans[0]; + for (i = 0; i < p->Ns[0]; i++) { + plan->transform(plan, din + (i * p->Ms[0]), buf + (i * p->Ms[0])); + } - size_t i,j; - for(i=0;iNs[0];i++) { - p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * p->Ms[0])); - } - ffts_transpose(buf, dout, p->Ms[0], p->Ns[0], p->transpose_buf); + ffts_transpose(buf, dout, p->Ms[0], p->Ns[0], p->transpose_buf); - for(i=1;irank;i++) { - for(j=0;jNs[i];j++) { - p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); - } - ffts_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); - } + for (i = 1; i < p->rank; i++) { + plan = p->plans[i]; + + for (j = 0; j < p->Ns[i]; j++) { + plan->transform(plan, dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); + } + + ffts_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); + } } -ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign) { - size_t vol = 1; +ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign) +{ + ffts_plan_t *p; + size_t vol; + int i; + + p = calloc(1, sizeof(*p)); + if (!p) { + return NULL; + } + + p->transform = &ffts_execute_nd; + p->destroy = &ffts_free_nd; + p->rank = rank; + + p->Ms = malloc(rank * sizeof(*p->Ms)); + if (!p->Ms) { + goto cleanup; + } + + p->Ns = malloc(rank * sizeof(*p->Ns)); + if (!p->Ns) { + goto cleanup; + } + + vol = p->Ns[0] = Ns[0]; + for (i = 1; i < rank; i++) { + p->Ns[i] = Ns[i]; + vol *= Ns[i]; + } + + p->buf = ffts_aligned_malloc(2 * vol * sizeof(float)); + if (!p->buf) { + goto cleanup; + } + + p->transpose_buf = ffts_aligned_malloc(2 * 8 * 8 * sizeof(float)); + if (!p->transpose_buf) { + goto cleanup; + } - ffts_plan_t *p = malloc(sizeof(ffts_plan_t)); + p->plans = calloc(rank, sizeof(*p->plans)); + if (!p->plans) { + goto cleanup; + } - p->transform = &ffts_execute_nd; - p->destroy = &ffts_free_nd; + for (i = 0; i < rank; i++) { + int k; - p->rank = rank; - p->Ns = malloc(sizeof(size_t) * rank); - p->Ms = malloc(sizeof(size_t) * rank); - p->plans = malloc(sizeof(ffts_plan_t **) * rank); - int i; - for(i=0;iNs[i] = Ns[i]; - vol *= Ns[i]; - } - p->buf = valloc(sizeof(float) * 2 * vol); + p->Ms[i] = vol / p->Ns[i]; - for(i=0;iMs[i] = vol / p->Ns[i]; + for (k = 0; k < i; k++) { + if (p->Ms[k] == p->Ms[i]) { + p->plans[i] = p->plans[k]; + break; + } + } - p->plans[i] = NULL; - int k; - for(k=0;kMs[k] == p->Ms[i]) - p->plans[i] = p->plans[k]; - } + if (!p->plans[i]) { + p->plans[i] = ffts_init_1d(p->Ms[i], sign); + if (!p->plans) { + goto cleanup; + } + } + } - if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign); - } + return p; - p->transpose_buf = valloc(sizeof(float) * 2 * 8 * 8); - return p; +cleanup: + ffts_free_nd(p); + return NULL; } +ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign) +{ + size_t Ns[2]; -ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign) { - size_t Ns[2]; - Ns[0] = N1; - Ns[1] = N2; - return ffts_init_nd(2, Ns, sign); + Ns[0] = N1; + Ns[1] = N2; + return ffts_init_nd(2, Ns, sign); } -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: diff --git a/src/ffts_nd.h b/src/ffts_nd.h index a9af3e2..a960cad 100644 --- a/src/ffts_nd.h +++ b/src/ffts_nd.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,29 +31,13 @@ */ -#ifndef __FFTS_ND_H__ -#define __FFTS_ND_H__ - -#include -#include -#include +#ifndef FFTS_ND_H +#define FFTS_ND_H #include "ffts.h" +#include -#ifdef HAVE_NEON - #include -#endif -#ifdef HAVE_SSE - #include -#endif - -void ffts_free_nd(ffts_plan_t *p); -void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf); - -void ffts_execute_nd(ffts_plan_t *p, const void * in, void * out); -ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); -ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign); - -#endif +ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); +ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign); -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_ND_H */ diff --git a/src/ffts_real.c b/src/ffts_real.c index 97ff942..77c57a0 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -1,227 +1,281 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_real.h" -void ffts_free_1d_real(ffts_plan_t *p) { - ffts_free(p->plans[0]); - free(p->A); - free(p->B); - free(p->plans); - free(p->buf); - free(p); -} +#ifdef HAVE_NEON +#include +#endif + +#ifdef HAVE_SSE +#include +#endif + +static void ffts_free_1d_real(ffts_plan_t *p) +{ + if (p->B) { + ffts_aligned_free(p->B); + } -void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) { - float *out = (float *)vout; - float *buf = (float *)p->buf; - float *A = p->A; - float *B = p->B; + if (p->A) { + ffts_aligned_free(p->A); + } - p->plans[0]->transform(p->plans[0], vin, buf); + if (p->buf) { + ffts_aligned_free(p->buf); + } - size_t N = p->N; - buf[N] = buf[0]; - buf[N+1] = buf[1]; + if (p->plans) { + ffts_free(p->plans[0]); + free(p->plans); + } - float *p_buf0 = buf; - float *p_buf1 = buf + N - 2; - float *p_out = out; + free(p); +} + +static void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) +{ + float *out = (float*) vout; + float *buf = (float*) p->buf; + float *A = p->A; + float *B = p->B; + size_t N = p->N; - size_t i; #ifdef __ARM_NEON__ - for(i=0;iplans[0]->transform(p->plans[0], vin, buf); - out[N] = buf[0] - buf[1]; - out[N+1] = 0.0f; + buf[N + 0] = buf[0]; + buf[N + 1] = buf[1]; +#ifdef __ARM_NEON__ + for (i = 0; i < N/2; i += 2) { + __asm__ __volatile__ ( + "vld1.32 {q8}, [%[pa]]!\n\t" + "vld1.32 {q9}, [%[pb]]!\n\t" + "vld1.32 {q10}, [%[buf0]]!\n\t" + "vld1.32 {q11}, [%[buf1]]\n\t" + "sub %[buf1], %[buf1], #16\n\t" + + "vdup.32 d26, d16[1]\n\t" + "vdup.32 d27, d17[1]\n\t" + "vdup.32 d24, d16[0]\n\t" + "vdup.32 d25, d17[0]\n\t" + + "vdup.32 d30, d23[1]\n\t" + "vdup.32 d31, d22[1]\n\t" + "vdup.32 d28, d23[0]\n\t" + "vdup.32 d29, d22[0]\n\t" + + "vmul.f32 q13, q13, q10\n\t" + "vmul.f32 q15, q15, q9\n\t" + "vmul.f32 q12, q12, q10\n\t" + "vmul.f32 q14, q14, q9\n\t" + "vrev64.f32 q13, q13\n\t" + "vrev64.f32 q15, q15\n\t" + + "vtrn.32 d26, d27\n\t" + "vtrn.32 d30, d31\n\t" + "vneg.f32 d26, d26\n\t" + "vneg.f32 d31, d31\n\t" + "vtrn.32 d26, d27\n\t" + "vtrn.32 d30, d31\n\t" + + "vadd.f32 q12, q12, q14\n\t" + "vadd.f32 q13, q13, q15\n\t" + "vadd.f32 q12, q12, q13\n\t" + "vst1.32 {q12}, [%[pout]]!\n\t" + : [pa] "+r" (A), [pb] "+r" (B), [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), + [pout] "+r" (p_out) + : + : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); + } +#else + for (i = 0; i < N/2; i++) { + out[2*i + 0] = buf[2*i + 0] * A[2*i] - buf[2*i + 1] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; + out[2*i + 1] = buf[2*i + 1] * A[2*i] + buf[2*i + 0] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; + + /* out[2*N-2*i+0] = out[2*i+0]; + out[2*N-2*i+1] = -out[2*i+1]; + */ + } +#endif + + out[N + 0] = buf[0] - buf[1]; + out[N + 1] = 0.0f; } -void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) { - float *out = (float *)vout; - float *in = (float *)vin; - float *buf = (float *)p->buf; - float *A = p->A; - float *B = p->B; - size_t N = p->N; +static void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) +{ + float *out = (float*) vout; + float *in = (float*) vin; + float *buf = (float*) p->buf; + float *A = p->A; + float *B = p->B; + size_t N = p->N; - float *p_buf0 = in; - float *p_buf1 = in + N - 2; +#ifdef __ARM_NEON__ + float *p_buf0 = in; + float *p_buf1 = in + N - 2; + float *p_out = buf; +#endif - float *p_out = buf; + size_t i; - size_t i; #ifdef __ARM_NEON__ - for(i=0;iplans[0]->transform(p->plans[0], buf, out); - -} - -ffts_plan_t *ffts_init_1d_real(size_t N, int sign) { - ffts_plan_t *p = malloc(sizeof(ffts_plan_t)); - - if(sign < 0) p->transform = &ffts_execute_1d_real; - else p->transform = &ffts_execute_1d_real_inv; - - p->destroy = &ffts_free_1d_real; - p->N = N; - p->rank = 1; - p->plans = malloc(sizeof(ffts_plan_t **) * 1); - - p->plans[0] = ffts_init_1d(N/2, sign); - - p->buf = valloc(sizeof(float) * 2 * ((N/2) + 1)); - - p->A = valloc(sizeof(float) * N); - p->B = valloc(sizeof(float) * N); - - if(sign < 0) { - int i; - for (i = 0; i < N/2; i++) { - p->A[2 * i] = 0.5 * (1.0 - sin (2.0f * PI / (double) (N) * (double) i)); - p->A[2 * i + 1] = 0.5 * (-1.0 * cos (2.0f * PI / (double) (N) * (double) i)); - p->B[2 * i] = 0.5 * (1.0 + sin (2.0f * PI / (double) (N) * (double) i)); - p->B[2 * i + 1] = 0.5 * (1.0 * cos (2.0f * PI / (double) (N) * (double) i)); - } - }else{ - int i; - for (i = 0; i < N/2; i++) { - p->A[2 * i] = 1.0 * (1.0 - sin (2.0f * PI / (double) (N) * (double) i)); - p->A[2 * i + 1] = 1.0 * (-1.0 * cos (2.0f * PI / (double) (N) * (double) i)); - p->B[2 * i] = 1.0 * (1.0 + sin (2.0f * PI / (double) (N) * (double) i)); - p->B[2 * i + 1] = 1.0 * (1.0 * cos (2.0f * PI / (double) (N) * (double) i)); - } - } - - return p; + p->plans[0]->transform(p->plans[0], buf, out); } - -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +ffts_plan_t *ffts_init_1d_real(size_t N, int sign) +{ + ffts_plan_t *p; + size_t i; + + p = (ffts_plan_t*) calloc(1, sizeof(*p)); + if (!p) { + return NULL; + } + + if (sign < 0) { + p->transform = &ffts_execute_1d_real; + } else { + p->transform = &ffts_execute_1d_real_inv; + } + + p->destroy = &ffts_free_1d_real; + p->N = N; + p->rank = 1; + + p->plans = (ffts_plan_t**) malloc(1 * sizeof(*p->plans)); + if (!p->plans) { + goto cleanup; + } + + p->plans[0] = ffts_init_1d(N/2, sign); + if (!p->plans[0]) { + goto cleanup; + } + + p->buf = ffts_aligned_malloc(2 * ((N/2) + 1) * sizeof(float)); + if (!p->buf) { + goto cleanup; + } + + p->A = (float*) ffts_aligned_malloc(N * sizeof(float)); + if (!p->A) { + goto cleanup; + } + + p->B = (float*) ffts_aligned_malloc(N * sizeof(float)); + if (!p->B) { + goto cleanup; + } + + if (sign < 0) { + for (i = 0; i < N/2; i++) { + p->A[2 * i + 0] = 0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i)); + p->A[2 * i + 1] = 0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + p->B[2 * i + 0] = 0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i)); + p->B[2 * i + 1] = 0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + } + } else { + for (i = 0; i < N/2; i++) { + p->A[2 * i + 0] = 1.0 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i)); + p->A[2 * i + 1] = 1.0 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + p->B[2 * i + 0] = 1.0 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i)); + p->B[2 * i + 1] = 1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + } + } + + return p; + +cleanup: + ffts_free_1d_real(p); + return NULL; +} \ No newline at end of file diff --git a/src/ffts_real.h b/src/ffts_real.h index d3f5316..81ca80f 100644 --- a/src/ffts_real.h +++ b/src/ffts_real.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,24 +31,16 @@ */ -#ifndef __FFTS_REAL_H__ -#define __FFTS_REAL_H__ +#ifndef FFTS_REAL_H +#define FFTS_REAL_H -#include -#include -#include +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif #include "ffts.h" - -#ifdef HAVE_NEON - #include -#endif -#ifdef HAVE_SSE - #include -#endif +#include ffts_plan_t *ffts_init_1d_real(size_t N, int sign); -#endif - -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_REAL_H */ diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 151b72a..05bcc9c 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -32,199 +32,305 @@ */ #include "ffts_real_nd.h" +#include "ffts_real.h" #ifdef __ARM_NEON__ #include "neon.h" #endif -void ffts_free_nd_real(ffts_plan_t *p) { +#ifdef HAVE_NEON +#include +#endif + +#ifdef HAVE_SSE +#include +#endif + +#include + +static void ffts_free_nd_real(ffts_plan_t *p) +{ + if (p->plans) { + int i; + + for (i = 0; i < p->rank; i++) { + ffts_plan_t *plan = p->plans[i]; + + if (plan) { + int j; + + for (j = i + 1; j < p->rank; j++) { + if (plan == p->plans[j]) { + p->plans[j] = NULL; + } + } - int i; - for(i=0;irank;i++) { + ffts_free(plan); + } + } - ffts_plan_t *x = p->plans[i]; + free(p->plans); + } - int k; - for(k=i+1;krank;k++) { - if(x == p->plans[k]) p->plans[k] = NULL; - } + if (p->transpose_buf) { + ffts_aligned_free(p->transpose_buf); + } - if(x) ffts_free(x); - } + if (p->buf) { + ffts_aligned_free(p->buf); + } - free(p->Ns); - free(p->Ms); - free(p->plans); - free(p->buf); - free(p->transpose_buf); - free(p); + if (p->Ns) { + free(p->Ns); + } + + if (p->Ms) { + free(p->Ms); + } + + free(p); } -void ffts_scalar_transpose(uint64_t *src, uint64_t *dst, int w, int h, uint64_t *buf) { - int const bw = 1; - int const bh = 8; - int i = 0, j = 0; - for (; i <= h-bh; i += bh) { - for (j = 0; j <= w-bw; j += bw) { - uint64_t const * ib = &src[w*i + j]; - uint64_t * ob = &dst[h*j + i]; - - uint64_t s_0_0 = ib[0*w+0]; - uint64_t s_1_0 = ib[1*w+0]; - uint64_t s_2_0 = ib[2*w+0]; - uint64_t s_3_0 = ib[3*w+0]; - uint64_t s_4_0 = ib[4*w+0]; - uint64_t s_5_0 = ib[5*w+0]; - uint64_t s_6_0 = ib[6*w+0]; - uint64_t s_7_0 = ib[7*w+0]; - - ob[0*h+0] = s_0_0; - ob[0*h+1] = s_1_0; - ob[0*h+2] = s_2_0; - ob[0*h+3] = s_3_0; - ob[0*h+4] = s_4_0; - ob[0*h+5] = s_5_0; - ob[0*h+6] = s_6_0; - ob[0*h+7] = s_7_0; - } - } - if (i < h) { - for (int i1 = 0; i1 < w; i1++) { - for (int j = i; j < h; j++) { - dst[i1*h + j] = src[j*w + i1]; - } - } - } - if (j < w) { - for (int i = j; i < w; i++) { - for (int j1 = 0; j1 < h; j1++) { - dst[i*h + j1] = src[j1*w + i]; - } - } - } +static void ffts_scalar_transpose(uint64_t *src, uint64_t *dst, int w, int h, uint64_t *buf) +{ + const int bw = 1; + const int bh = 8; + int i = 0, j = 0; + + for (; i <= h - bh; i += bh) { + for (j = 0; j <= w - bw; j += bw) { + uint64_t const *ib = &src[w*i + j]; + uint64_t *ob = &dst[h*j + i]; + + uint64_t s_0_0 = ib[0*w + 0]; + uint64_t s_1_0 = ib[1*w + 0]; + uint64_t s_2_0 = ib[2*w + 0]; + uint64_t s_3_0 = ib[3*w + 0]; + uint64_t s_4_0 = ib[4*w + 0]; + uint64_t s_5_0 = ib[5*w + 0]; + uint64_t s_6_0 = ib[6*w + 0]; + uint64_t s_7_0 = ib[7*w + 0]; + + ob[0*h + 0] = s_0_0; + ob[0*h + 1] = s_1_0; + ob[0*h + 2] = s_2_0; + ob[0*h + 3] = s_3_0; + ob[0*h + 4] = s_4_0; + ob[0*h + 5] = s_5_0; + ob[0*h + 6] = s_6_0; + ob[0*h + 7] = s_7_0; + } + } + + if (i < h) { + int i1; + + for (i1 = 0; i1 < w; i1++) { + for (j = i; j < h; j++) { + dst[i1*h + j] = src[j*w + i1]; + } + } + } + + if (j < w) { + int j1; + + for (i = j; i < w; i++) { + for (j1 = 0; j1 < h; j1++) { + dst[i*h + j1] = src[j1*w + i]; + } + } + } } -void ffts_execute_nd_real(ffts_plan_t *p, const void * in, void * out) { +static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) +{ + const size_t Ms0 = p->Ms[0]; + const size_t Ns0 = p->Ns[0]; + + uint32_t *din = (uint32_t*) in; + uint64_t *buf = p->buf; + uint64_t *dout = (uint64_t*) out; + uint64_t *transpose_buf = (uint64_t*) p->transpose_buf; + + ffts_plan_t *plan; + size_t i, j; - uint32_t *din = (uint32_t *)in; - uint64_t *buf = p->buf; - uint64_t *dout = (uint64_t *)out; + plan = p->plans[0]; + for (i = 0; i < Ns0; i++) { + plan->transform(plan, din + (i * Ms0), buf + (i * (Ms0 / 2 + 1))); + } - size_t i,j; - for(i=0;iNs[0];i++) { - p->plans[0]->transform(p->plans[0], din + (i * p->Ms[0]), buf + (i * (p->Ms[0] / 2 + 1))); - } - ffts_scalar_transpose(buf, dout, p->Ms[0] / 2 + 1, p->Ns[0], p->transpose_buf); + ffts_scalar_transpose(buf, dout, Ms0 / 2 + 1, Ns0, transpose_buf); - for(i=1;irank;i++) { - for(j=0;jNs[i];j++) { - p->plans[i]->transform(p->plans[i], dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); - } - ffts_scalar_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); - } + for (i = 1; i < p->rank; i++) { + const size_t Ms = p->Ms[i]; + const size_t Ns = p->Ns[i]; + + plan = p->plans[i]; + + for (j = 0; j < Ns; j++) { + plan->transform(plan, dout + (j * Ms), buf + (j * Ms)); + } + + ffts_scalar_transpose(buf, dout, Ms, Ns, transpose_buf); + } } -void ffts_execute_nd_real_inv(ffts_plan_t *p, const void * in, void * out) { +static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) +{ + const size_t Ms0 = p->Ms[0]; + const size_t Ms1 = p->Ms[1]; + const size_t Ns0 = p->Ns[0]; + const size_t Ns1 = p->Ns[1]; + + uint64_t *din = (uint64_t*) in; + uint64_t *buf = p->buf; + uint64_t *buf2; + uint64_t *transpose_buf = (uint64_t*) p->transpose_buf; + float *doutr = (float*) out; - uint64_t *din = (uint64_t *)in; - uint64_t *buf = p->buf; - uint64_t *buf2; - uint64_t *dout = (uint64_t *)out; - size_t vol = 1; + ffts_plan_t *plan; + size_t vol; - float *bufr = (float *)(p->buf); - float *doutr = (float *)out; + size_t i, j; - size_t i,j; + vol = p->Ns[0]; + for (i = 1; i < p->rank; i++) { + vol *= p->Ns[i]; + } - for(i=0;irank;i++) { - vol *= p->Ns[i]; - } + buf2 = buf + vol; - buf2 = buf + vol; + ffts_scalar_transpose(din, buf, Ms0, Ns0, transpose_buf); - ffts_scalar_transpose(din, buf, p->Ms[0], p->Ns[0], p->transpose_buf); + plan = p->plans[0]; + for (i = 0; i < Ms0; i++) { + plan->transform(plan, buf + (i * Ns0), buf2 + (i * Ns0)); + } - for(i=0;iMs[0];i++) { - p->plans[0]->transform(p->plans[0], buf + (i * p->Ns[0]), buf2 + (i * p->Ns[0])); - } + ffts_scalar_transpose(buf2, buf, Ns0, Ms0, transpose_buf); - ffts_scalar_transpose(buf2, buf, p->Ns[0], p->Ms[0], p->transpose_buf); - for(j=0;jMs[1];j++) { - p->plans[1]->transform(p->plans[1], buf + (j * (p->Ms[0])), &doutr[j * p->Ns[1]]); - } + plan = p->plans[1]; + for (j = 0; j < Ms1; j++) { + plan->transform(plan, buf + (j * Ms0), &doutr[j * Ns1]); + } } -ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) { - size_t vol = 1; - size_t bufsize; - - ffts_plan_t *p = malloc(sizeof(ffts_plan_t)); - - if(sign < 0) p->transform = &ffts_execute_nd_real; - else p->transform = &ffts_execute_nd_real_inv; - - p->destroy = &ffts_free_nd_real; - - p->rank = rank; - p->Ns = malloc(sizeof(size_t) * rank); - p->Ms = malloc(sizeof(size_t) * rank); - p->plans = malloc(sizeof(ffts_plan_t **) * rank); - int i; - for(i=0;iNs[i] = Ns[i]; - vol *= Ns[i]; - } - - //There is probably a prettier way of doing this, but it works.. - if(sign < 0) { - bufsize = 2 * vol; - } - else { - bufsize = 2 * (Ns[0] * ((vol / Ns[0]) / 2 + 1) + vol); - } - - p->buf = valloc(sizeof(float) * bufsize); - - for(i=0;iMs[i] = vol / p->Ns[i]; - - p->plans[i] = NULL; - int k; - - if(sign < 0) { - for(k=1;kMs[k] == p->Ms[i]) p->plans[i] = p->plans[k]; - } - if(!i) p->plans[i] = ffts_init_1d_real(p->Ms[i], sign); - else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ms[i], sign); - }else{ - for(k=0;kNs[k] == p->Ns[i]) p->plans[i] = p->plans[k]; - } - if(i==rank-1) p->plans[i] = ffts_init_1d_real(p->Ns[i], sign); - else if(!p->plans[i]) p->plans[i] = ffts_init_1d(p->Ns[i], sign); - } - } - if(sign < 0) { - for(i=1;iNs[i] = p->Ns[i] / 2 + 1; - } - }else{ - for(i=0;iMs[i] = p->Ms[i] / 2 + 1; - } - } - - p->transpose_buf = valloc(sizeof(float) * 2 * 8 * 8); - return p; +ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) +{ + int i; + size_t vol = 1; + size_t bufsize; + ffts_plan_t *p; + + p = (ffts_plan_t*) calloc(1, sizeof(*p)); + if (!p) { + return NULL; + } + + if (sign < 0) { + p->transform = &ffts_execute_nd_real; + } else { + p->transform = &ffts_execute_nd_real_inv; + } + + p->destroy = &ffts_free_nd_real; + p->rank = rank; + + p->Ms = (size_t*) malloc(rank * sizeof(*p->Ms)); + if (!p->Ms) { + goto cleanup; + } + + p->Ns = (size_t*) malloc(rank * sizeof(*p->Ns)); + if (!p->Ns) { + goto cleanup; + } + + for (i = 0; i < rank; i++) { + p->Ns[i] = Ns[i]; + vol *= Ns[i]; + } + + /* there is probably a prettier way of doing this, but it works.. */ + if (sign < 0) { + bufsize = 2 * vol; + } else { + bufsize = 2 * (Ns[0] * ((vol / Ns[0]) / 2 + 1) + vol); + } + + p->buf = ffts_aligned_malloc(bufsize * sizeof(float)); + if (!p->buf) { + goto cleanup; + } + + p->transpose_buf = ffts_aligned_malloc(2 * 8 * 8 * sizeof(float)); + if (!p->transpose_buf) { + goto cleanup; + } + + p->plans = (ffts_plan_t**) calloc(rank, sizeof(*p->plans)); + if (!p->plans) { + goto cleanup; + } + + for (i = 0; i < rank; i++) { + int k; + + p->Ms[i] = vol / p->Ns[i]; + + if (sign < 0) { + if (!i) { + p->plans[i] = ffts_init_1d_real(p->Ms[i], sign); + } else { + for (k = 1; k < i; k++) { + if (p->Ms[k] == p->Ms[i]) { + p->plans[i] = p->plans[k]; + break; + } + } + + if (!p->plans[i]) { + p->plans[i] = ffts_init_1d(p->Ms[i], sign); + p->Ns[i] = p->Ns[i] / 2 + 1; + } + } + } else { + if (i == rank - 1) { + p->plans[i] = ffts_init_1d_real(p->Ns[i], sign); + } else { + for (k = 0; k < i; k++) { + if (p->Ns[k] == p->Ns[i]) { + p->plans[i] = p->plans[k]; + break; + } + } + + if (!p->plans[i]) { + p->plans[i] = ffts_init_1d(p->Ns[i], sign); + p->Ms[i] = p->Ms[i] / 2 + 1; + } + } + } + + if (!p->plans[i]) { + goto cleanup; + } + } + + return p; + +cleanup: + ffts_free_nd_real(p); + return NULL; } +ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign) +{ + size_t Ns[2]; -ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign) { - size_t Ns[2]; - Ns[0] = N1; - Ns[1] = N2; - return ffts_init_nd_real(2, Ns, sign); + Ns[0] = N1; + Ns[1] = N2; + return ffts_init_nd_real(2, Ns, sign); } -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: diff --git a/src/ffts_real_nd.h b/src/ffts_real_nd.h index bc8ed75..d23a002 100644 --- a/src/ffts_real_nd.h +++ b/src/ffts_real_nd.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,24 +31,17 @@ */ -#ifndef __FFTS_REAL_ND_H__ -#define __FFTS_REAL_ND_H__ +#ifndef FFTS_REAL_ND_H +#define FFTS_REAL_ND_H -#include -#include -#include +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif -#include "ffts_nd.h" -#include "ffts_real.h" #include "ffts.h" +#include -#ifdef HAVE_NEON - #include -#endif -#ifdef HAVE_SSE - #include -#endif - -#endif +ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); +ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign); -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_REAL_ND_H */ \ No newline at end of file diff --git a/src/ffts_small.c b/src/ffts_small.c index e53493c..6f700c6 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2013, Michael J. Cree + + Copyright (c) 2013, Michael J. Cree Copyright (c) 2012, 2013, Anthony M. Blake - + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,127 +31,153 @@ */ -#include "ffts.h" +#include "ffts_small.h" #include "macros.h" #include -#define DEBUG(x) - -#include "ffts_small.h" - - void firstpass_16_f(ffts_plan_t * p, const void * in, void * out) +void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; - V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15; - float *LUT8 = p->ws; - - L_4_4(0, din+0,din+16,din+8,din+24,&r0_1,&r2_3,&r8_9,&r10_11); - L_2_4(0, din+4,din+20,din+28,din+12,&r4_5,&r6_7,&r14_15,&r12_13); - K_N(0, VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7); - K_N(0, VLD(LUT8+8),VLD(LUT8+12),&r0_1,&r4_5,&r8_9,&r12_13); - S_4(r0_1,r4_5,r8_9,r12_13,dout+0,dout+8,dout+16,dout+24); - K_N(0, VLD(LUT8+16),VLD(LUT8+20),&r2_3,&r6_7,&r10_11,&r14_15); - S_4(r2_3,r6_7,r10_11,r14_15,dout+4,dout+12,dout+20,dout+28); + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; + float *LUT8 = (float*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); } - void firstpass_16_b(ffts_plan_t * p, const void * in, void * out) +void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; - V r0_1,r2_3,r4_5,r6_7,r8_9,r10_11,r12_13,r14_15; - float *LUT8 = p->ws; - - L_4_4(1, din+0,din+16,din+8,din+24,&r0_1,&r2_3,&r8_9,&r10_11); - L_2_4(1, din+4,din+20,din+28,din+12,&r4_5,&r6_7,&r14_15,&r12_13); - K_N(1, VLD(LUT8),VLD(LUT8+4),&r0_1,&r2_3,&r4_5,&r6_7); - K_N(1, VLD(LUT8+8),VLD(LUT8+12),&r0_1,&r4_5,&r8_9,&r12_13); - S_4(r0_1,r4_5,r8_9,r12_13,dout+0,dout+8,dout+16,dout+24); - K_N(1, VLD(LUT8+16),VLD(LUT8+20),&r2_3,&r6_7,&r10_11,&r14_15); - S_4(r2_3,r6_7,r10_11,r14_15,dout+4,dout+12,dout+20,dout+28); -} + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; + float *LUT8 = (float*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +} - void firstpass_8_f(ffts_plan_t *p, const void *in, void *out) +void ffts_firstpass_8_f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; V r0_1, r2_3, r4_5, r6_7; - float *LUT8 = p->ws + p->ws_is[0]; + float *LUT8 = (float*) p->ws + p->ws_is[0]; L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1,r2_3,r4_5,r6_7,dout+0,dout+4,dout+8,dout+12); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); } - void firstpass_8_b(ffts_plan_t *p, const void *in, void *out) +void ffts_firstpass_8_b(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; V r0_1, r2_3, r4_5, r6_7; - float *LUT8 = p->ws + p->ws_is[0]; + float *LUT8 = (float*) p->ws + p->ws_is[0]; L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1,r2_3,r4_5,r6_7,dout+0,dout+4,dout+8,dout+12); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); } - - void firstpass_4_f(ffts_plan_t *p, const void *in, void *out) +void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; cdata_t t0, t1, t2, t3, t4, t5, t6, t7; - t0[0] = din[0]; t0[1] = din[1]; - t1[0] = din[4]; t1[1] = din[5]; - t2[0] = din[2]; t2[1] = din[3]; - t3[0] = din[6]; t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] + t7[1]; dout[3] = t5[1] - t7[0]; - dout[6] = t5[0] - t7[1]; dout[7] = t5[1] + t7[0]; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] + t7[1]; + dout[3] = t5[1] - t7[0]; + dout[6] = t5[0] - t7[1]; + dout[7] = t5[1] + t7[0]; } - void firstpass_4_b(ffts_plan_t *p, const void *in, void *out) +void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; cdata_t t0, t1, t2, t3, t4, t5, t6, t7; - t0[0] = din[0]; t0[1] = din[1]; - t1[0] = din[4]; t1[1] = din[5]; - t2[0] = din[2]; t2[1] = din[3]; - t3[0] = din[6]; t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] - t7[1]; dout[3] = t5[1] + t7[0]; - dout[6] = t5[0] + t7[1]; dout[7] = t5[1] - t7[0]; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] - t7[1]; + dout[3] = t5[1] + t7[0]; + dout[6] = t5[0] + t7[1]; + dout[7] = t5[1] - t7[0]; } - void firstpass_2(ffts_plan_t *p, const void *in, void *out) +void ffts_firstpass_2(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t *)in; - data_t *dout = (data_t *)out; - cdata_t t0, t1, r0,r1; - t0[0] = din[0]; t0[1] = din[1]; - t1[0] = din[2]; t1[1] = din[3]; + const data_t *din = (const data_t*) in; + data_t *dout = (data_t*) out; + cdata_t t0, t1, r0, r1; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[2]; + t1[1] = din[3]; + r0[0] = t0[0] + t1[0]; r0[1] = t0[1] + t1[1]; r1[0] = t0[0] - t1[0]; r1[1] = t0[1] - t1[1]; - dout[0] = r0[0]; dout[1] = r0[1]; - dout[2] = r1[0]; dout[3] = r1[1]; -} -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + + dout[0] = r0[0]; + dout[1] = r0[1]; + dout[2] = r1[0]; + dout[3] = r1[1]; +} \ No newline at end of file diff --git a/src/ffts_small.h b/src/ffts_small.h index 683537a..5ae48cc 100644 --- a/src/ffts_small.h +++ b/src/ffts_small.h @@ -1,14 +1,14 @@ -#ifndef __FFTS_SMALL_H__ -#define __FFTS_SMALL_H__ +#ifndef FFTS_SMALL_H +#define FFTS_SMALL_H +#include "ffts.h" -void firstpass_16_f(ffts_plan_t * p, const void * in, void * out); -void firstpass_16_b(ffts_plan_t * p, const void * in, void * out); -void firstpass_8_f(ffts_plan_t * p, const void * in, void * out); -void firstpass_8_b(ffts_plan_t * p, const void * in, void * out); -void firstpass_4_f(ffts_plan_t * p, const void * in, void * out); -void firstpass_4_b(ffts_plan_t * p, const void * in, void * out); -void firstpass_2(ffts_plan_t * p, const void * in, void * out); +void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_8_f(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_8_b(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out); +void ffts_firstpass_2(ffts_plan_t *p, const void *in, void *out); -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_SMALL_H */ -- cgit v1.1 From f8931f2f37f79f36d1b8ad8e4d6908e6bdfcaf4f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 16:18:11 +0200 Subject: MSVC has problems with "complex" type --- src/types.h | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/types.h b/src/types.h index a7425c2..749d387 100644 --- a/src/types.h +++ b/src/types.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,20 +31,19 @@ */ +#ifndef FFTS_TYPES_H +#define FFTS_TYPES_H -#ifndef __TYPES_H__ -#define __TYPES_H__ - -#define __INLINE static inline __attribute__((always_inline)) - -#if defined(complex) - typedef complex float cdata_t; -#else - typedef float cdata_t[2]; +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once #endif - typedef float data_t; +#if defined(_Complex_I) && defined(complex) && defined(I) +typedef complex float cdata_t; +#else +typedef float cdata_t[2]; #endif +typedef float data_t; -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_TYPES_H */ -- cgit v1.1 From d7a62e9eda3d64c5f0d4c1e8f767ca698b7a8df2 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 29 Oct 2014 16:30:06 +0200 Subject: Patterns doesn't depend on plan --- src/patterns.c | 333 ++++++++++++++++++++++++++++----------------------------- src/patterns.h | 25 ++--- 2 files changed, 174 insertions(+), 184 deletions(-) diff --git a/src/patterns.c b/src/patterns.c index 16d2301..fa37595 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -1,209 +1,198 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "patterns.h" -void permute_addr(int N, int offset, int stride, int *d) { - int i, a[4] = {0,2,1,3}; - for(i=0;i<4;i++) { +#include +#include +#include +#include +#include + +static void ffts_permute_addr(int N, int offset, int stride, int *d) +{ + int a[4] = {0,2,1,3}; + int i; + + for (i = 0; i < 4; i++) { d[i] = offset + (a[i] << stride); - if(d[i] < 0) d[i] += N; + if (d[i] < 0) { + d[i] += N; + } } } -void ffts_hardcodedleaf_is_rec(ptrdiff_t **is, int bigN, int N, int poffset, int offset, int stride, int even, int VL) { - - if(N > 4) { - ffts_hardcodedleaf_is_rec(is, bigN, N/2, poffset, offset, stride + 1, even, VL); - if(N/4 >= 4) ffts_hardcodedleaf_is_rec(is, bigN, N/4, poffset+(1<= 4) ffts_hardcodedleaf_is_rec(is, bigN, N/4, poffset-(1< 4) { + ffts_hardcodedleaf_is_rec(is, big_N, N/2, poffset, offset, stride + 1, even, VL); -void ffts_init_is(ffts_plan_t *p, int N, int leafN, int VL) { - int i, i0 = N/leafN/3+1, i1=N/leafN/3, i2 = N/leafN/3; - int stride = log(N/leafN)/log(2); - - p->is = malloc(N/VL * sizeof(ptrdiff_t)); - - ptrdiff_t *is = p->is; + if (N/4 >= 4) { + ffts_hardcodedleaf_is_rec(is, big_N, N/4, poffset + (1 << stride), offset + (N/2), stride + 2, 0, VL); + ffts_hardcodedleaf_is_rec(is, big_N, N/4, poffset - (1 << stride), offset + (3*N/4), stride + 2, 0, VL); + } else { + int temp = poffset + (1 << stride); - if((N/leafN) % 3 > 1) i1++; + if (temp < 0) { + temp += big_N; + } - for(i=0;iis[i]); -// if(i % 16 == 15) printf("\n"); -//} + (*is)[0] = poffset + (1 << stride); + (*is)[1] = poffset + (1 << stride) + (1 << (stride + 2)); + (*is)[2] = poffset - (1 << stride); + (*is)[3] = poffset - (1 << stride) + (1 << (stride + 2)); - p->i0 = i0; p->i1 = i1; -} -/** - * - * - */ -void ffts_elaborate_offsets(ptrdiff_t *offsets, int leafN, int N, int ioffset, int ooffset, int stride, int even) { - if((even && N == leafN) || (!even && N <= leafN)) { - offsets[2*(ooffset/leafN)] = ioffset*2; - offsets[2*(ooffset/leafN)+1] = ooffset; - }else if(N > 4) { - ffts_elaborate_offsets(offsets, leafN, N/2, ioffset, ooffset, stride+1, even); - ffts_elaborate_offsets(offsets, leafN, N/4, ioffset+(1<= leafN) - ffts_elaborate_offsets(offsets, leafN, N/4, ioffset-(1< 1) { + i1++; + } + + p = is; + for (i = 0; i < i0; i++) { + ffts_hardcodedleaf_is_rec(&p, N, leaf_N, i, 0, stride, 1, VL); + } -void ffts_init_offsets(ffts_plan_t *p, int N, int leafN) { + for (i = i0; i < i0 + i1; i++) { + ffts_hardcodedleaf_is_rec(&p, N, leaf_N / 2, i, 0, stride+1, 1, VL); + ffts_hardcodedleaf_is_rec(&p, N, leaf_N / 2, i - (1 << stride), 0, stride + 1, 1, VL); + } - ptrdiff_t *offsets = malloc(2 * N/leafN * sizeof(ptrdiff_t)); + for (i = 0 - i2; i < 0; i++) { + ffts_hardcodedleaf_is_rec(&p, N, leaf_N, i, 0, stride, 1, VL); + } - ffts_elaborate_offsets(offsets, leafN, N, 0, 0, 1, 1); + return is; +} - size_t i; - for(i=0;i<2*N/leafN;i+=2) { - if(offsets[i] < 0) offsets[i] = N + offsets[i]; - } - - qsort(offsets, N/leafN, 2 * sizeof(ptrdiff_t), compare_offsets); - //elaborate_is(p, N, 0, 0, 1); - p->offsets = malloc(N/leafN * sizeof(ptrdiff_t)); - for(i=0;ioffsets[i] = offsets[i*2+1]*2; - } -//for(i=0;ioffsets[i], reverse_bits(p->offsets[i], __builtin_ctzl(2*N))); -//} - free(offsets); +static void ffts_elaborate_offsets(ptrdiff_t *offsets, int leafN, int N, int ioffset, int ooffset, int stride, int even) +{ + if ((even && N == leafN) || (!even && N <= leafN)) { + offsets[2 * (ooffset / leafN) + 0] = ioffset * 2; + offsets[2 * (ooffset / leafN) + 1] = ooffset; + } else if (N > 4) { + ffts_elaborate_offsets(offsets, leafN, N/2, ioffset, ooffset, stride + 1, even); + ffts_elaborate_offsets(offsets, leafN, N/4, ioffset + (1<= leafN) { + ffts_elaborate_offsets(offsets, leafN, N/4, ioffset - (1< INT_MIN && diff < INT_MAX); + return (int) diff; } -void elaborate_tree(transform_index_t **p, int N, int leafN, int offset) { - - if(N <= leafN) return; - elaborate_tree(p, N/4, leafN, offset); - elaborate_tree(p, N/8, leafN, offset + N/4); - elaborate_tree(p, N/8, leafN, offset + N/4 + N/8); - elaborate_tree(p, N/4, leafN, offset + N/2); - elaborate_tree(p, N/4, leafN, offset + 3*N/4); +ptrdiff_t *ffts_init_offsets(int N, int leaf_N) +{ + ptrdiff_t *offsets, *tmp; + size_t i; - (*p)[0] = N; - (*p)[1] = offset*2; + offsets = malloc(N/leaf_N * sizeof(*offsets)); + if (!offsets) { + return NULL; + } - (*p)+=2; -} + tmp = malloc(2 * N/leaf_N * sizeof(*tmp)); + if (!tmp) { + free(offsets); + return NULL; + } -void ffts_init_tree(ffts_plan_t *p, int N, int leafN) { + ffts_elaborate_offsets(tmp, leaf_N, N, 0, 0, 1, 1); - int count = tree_count(N, leafN, 0) + 1; - transform_index_t *ps = p->transforms = malloc(count * 2 * sizeof(transform_index_t)); + for (i = 0; i < 2*N/leaf_N; i += 2) { + if (tmp[i] < 0) { + tmp[i] = N + tmp[i]; + } + } -//printf("count = %d\n", count); + qsort(tmp, N/leaf_N, 2 * sizeof(*tmp), ffts_compare_offsets); - elaborate_tree(&ps, N, leafN, 0); - #ifdef __ARM_NEON__ - ps -= 2; - #endif - ps[0] = 0; - ps[1] = 0; -//int i; -//for(i=0;itransforms[i*2], p->transforms[i*2+1], -// __builtin_ctzl(p->transforms[i*2]) - 5); -//} + for (i = 0; i < N/leaf_N; i++) { + offsets[i] = 2 * tmp[2*i + 1]; + } -} -*/ -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + free(tmp); + return offsets; +} \ No newline at end of file diff --git a/src/patterns.h b/src/patterns.h index 699a0e5..680c6e0 100644 --- a/src/patterns.h +++ b/src/patterns.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,15 +31,16 @@ */ +#ifndef FFTS_PATTERNS_H +#define FFTS_PATTERNS_H -#ifndef __PATTERNS_H__ -#define __PATTERNS_H__ +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif -#include "ffts.h" +#include -void ffts_init_is(ffts_plan_t *p, int N, int leafN, int VL); -void ffts_init_offsets(ffts_plan_t *p, int N, int leafN); -//void ffts_init_tree(ffts_plan_t *p, int N, int leafN); +ptrdiff_t *ffts_init_is(int N, int leaf_N, int VL); +ptrdiff_t *ffts_init_offsets(int N, int leaf_N); -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_PATTERNS_H */ -- cgit v1.1 From 7b999686ec4c732d28efd344065606fccba84ae4 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 31 Oct 2014 17:51:05 +0200 Subject: More consistent naming --- src/patterns.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/patterns.c b/src/patterns.c index fa37595..f748c48 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -111,7 +111,7 @@ ptrdiff_t *ffts_init_is(int N, int leaf_N, int VL) { int i, i0, i1, i2; int stride = (int) (log(N/leaf_N) / log(2)); - ptrdiff_t *is, *p; + ptrdiff_t *is, *pis; is = malloc(N / VL * sizeof(*is)); if (!is) { @@ -124,18 +124,18 @@ ptrdiff_t *ffts_init_is(int N, int leaf_N, int VL) i1++; } - p = is; + pis = is; for (i = 0; i < i0; i++) { - ffts_hardcodedleaf_is_rec(&p, N, leaf_N, i, 0, stride, 1, VL); + ffts_hardcodedleaf_is_rec(&pis, N, leaf_N, i, 0, stride, 1, VL); } for (i = i0; i < i0 + i1; i++) { - ffts_hardcodedleaf_is_rec(&p, N, leaf_N / 2, i, 0, stride+1, 1, VL); - ffts_hardcodedleaf_is_rec(&p, N, leaf_N / 2, i - (1 << stride), 0, stride + 1, 1, VL); + ffts_hardcodedleaf_is_rec(&pis, N, leaf_N / 2, i, 0, stride + 1, 1, VL); + ffts_hardcodedleaf_is_rec(&pis, N, leaf_N / 2, i - (1 << stride), 0, stride + 1, 1, VL); } for (i = 0 - i2; i < 0; i++) { - ffts_hardcodedleaf_is_rec(&p, N, leaf_N, i, 0, stride, 1, VL); + ffts_hardcodedleaf_is_rec(&pis, N, leaf_N, i, 0, stride, 1, VL); } return is; -- cgit v1.1 From 196fb0c0c1541cf1ec1b5e9ff8ac0e8109fde29c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 31 Oct 2014 17:55:21 +0200 Subject: Add CMake as an alternative build system Add support for Windows x64 (requires YASM) --- CMakeLists.txt | 158 ++++++ src/codegen.c | 1501 +++++++++++++++++++++++++++++++---------------------- src/codegen.h | 27 +- src/codegen_sse.h | 240 +++++---- src/ffts.c | 940 ++++++++++++++++++++------------- src/ffts.h | 273 +++++----- src/macros-sse.h | 44 +- src/macros.h | 167 +++--- src/sse.s | 13 +- src/sse_win64.s | 840 ++++++++++++++++++++++++++++++ 10 files changed, 2878 insertions(+), 1325 deletions(-) create mode 100644 CMakeLists.txt create mode 100644 src/sse_win64.s diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..365ec32 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,158 @@ +cmake_minimum_required(VERSION 2.8) + +project(ffts C ASM) + +set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) +set_property(GLOBAL PROPERTY USE_FOLDERS ON) + +# default build type is Debug which means no optimization +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") +endif(NOT CMAKE_BUILD_TYPE) + +# common options +option(ENABLE_SSE + "Enables the use of SSE instructions." ON +) + +option(ENABLE_NEON + "Enables the use of NEON instructions." OFF +) + +option(ENABLE_VFP + "Enables the use of VFP instructions." OFF +) + +option(DISABLE_DYNAMIC_CODE + "Disables the use of dynamic machine code generation." OFF +) + +option(ENABLE_SHARED + "Enable building a shared library." OFF +) + +#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -pipe -Wall") +add_definitions(-DFFTS_CMAKE_GENERATED) + +include(CheckIncludeFile) +include(CheckLibraryExists) + +if(MSVC) + add_definitions(-D_USE_MATH_DEFINES) +else() + # some systems need libm for some of the math functions to work + check_library_exists(m pow "" HAVE_LIBM) + if(HAVE_LIBM) + list(APPEND CMAKE_REQUIRED_LIBRARIES m) + list(APPEND FFTS_EXTRA_LIBRARIES m) + endif(HAVE_LIBM) +endif(MSVC) + +include_directories(src) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +set(FFTS_SOURCES + src/ffts_attributes.h + src/ffts.c + src/ffts.h + src/ffts_nd.c + src/ffts_nd.h + src/ffts_real.h + src/ffts_real.c + src/ffts_real_nd.c + src/ffts_real_nd.h + src/ffts_small.c + src/macros.h + src/patterns.c + src/patterns.h + src/types.h +) + +if(ENABLE_SSE) + list(APPEND FFTS_SOURCES + src/macros-sse.h + ) + + if(MSVC) + set(CMAKE_ASM-ATT_COMPILER yasm) + enable_language(ASM-ATT) + + add_custom_command( + OUTPUT sse_win64.obj + COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 + -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas + ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Generating sse_win64.obj" + ) + + list(APPEND FFTS_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj + src/sse_win64.s + ) + else() + list(APPEND FFTS_SOURCES + src/sse.s + ) + endif(MSVC) + + add_definitions(-D_USE_MATH_DEFINES) + add_definitions(-D__x86_64__) + add_definitions(-DHAVE_SSE -msse2) +endif() + +if(ENABLE_NEON) + if(DISABLE_DYNAMIC_CODE) + list(APPEND FFTS_SOURCES + source/neon_static_f.s + source/neon_static_i.s + ) + else() + list(APPEND FFTS_SOURCES + source/neon.s + source/arch/neon.c + ) + endif() + + add_definitions(-DHAVE_NEON) +endif() + +if(ENABLE_VFP) + list(APPEND FFTS_SOURCES + source/vfp.s + source/arch/vfp.c + ) + + add_definitions(-DHAVE_VFP) +endif() + +if(ENABLE_SINGLE) + add_definitions(-DHAVE_SINGLE) +endif() + +if(DISABLE_DYNAMIC_CODE) + list(APPEND FFTS_SOURCES + src/ffts_static.c + ) + + add_definitions(-DDYNAMIC_DISABLED) +else() + list(APPEND FFTS_SOURCES + src/codegen.c + src/codegen.h + ) +endif() + +add_library(ffts_static + ${FFTS_SOURCES} +) + +add_executable(ffts_test + tests/test.c +) + +target_link_libraries(ffts_test + ffts_static + ${FFTS_EXTRA_LIBRARIES} +) \ No newline at end of file diff --git a/src/codegen.c b/src/codegen.c index 79aaca6..0cc3d24 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,698 +35,951 @@ #include "macros.h" #include "ffts.h" -#ifdef __APPLE__ - #include -#endif - -#include -#include - #ifdef HAVE_NEON - #include "codegen_arm.h" - #include "neon.h" +#include "codegen_arm.h" +#include "neon.h" #elif HAVE_VFP - #include "codegen_arm.h" - #include "vfp.h" +#include "codegen_arm.h" +#include "vfp.h" #else - #include "codegen_sse.h" - #include "macros-sse.h" +#include "codegen_sse.h" +#include "macros-sse.h" #endif +#include +#include +#include +/* #include */ +#include +#include + #ifdef __ANDROID__ - #include +#include #endif -int tree_count(int N, int leafN, int offset) { - - if(N <= leafN) return 0; - int count = 0; - count += tree_count(N/4, leafN, offset); - count += tree_count(N/8, leafN, offset + N/4); - count += tree_count(N/8, leafN, offset + N/4 + N/8); - count += tree_count(N/4, leafN, offset + N/2); - count += tree_count(N/4, leafN, offset + 3*N/4); - - return 1 + count; -} - -void elaborate_tree(size_t **p, int N, int leafN, int offset) { - - if(N <= leafN) return; - elaborate_tree(p, N/4, leafN, offset); - elaborate_tree(p, N/8, leafN, offset + N/4); - elaborate_tree(p, N/8, leafN, offset + N/4 + N/8); - elaborate_tree(p, N/4, leafN, offset + N/2); - elaborate_tree(p, N/4, leafN, offset + 3*N/4); - - (*p)[0] = N; - (*p)[1] = offset*2; +#ifdef __arm__ +typedef uint32_t insns_t; +#else +typedef uint8_t insns_t; +#endif - (*p)+=2; -} +#define P(x) (*(*p)++ = x) +static int ffts_tree_count(int N, int leaf_N, int offset) +{ + int count; + if (N <= leaf_N) { + return 0; + } + count = ffts_tree_count(N/4, leaf_N, offset); + count += ffts_tree_count(N/8, leaf_N, offset + N/4); + count += ffts_tree_count(N/8, leaf_N, offset + N/4 + N/8); + count += ffts_tree_count(N/4, leaf_N, offset + N/2); + count += ffts_tree_count(N/4, leaf_N, offset + 3*N/4); -uint32_t LUT_offset(size_t N, size_t leafN) { - int i; - size_t p_lut_size = 0; - size_t lut_size = 0; - int hardcoded = 0; - size_t n_luts = __builtin_ctzl(N/leafN); - int n = leafN*2; - //if(N <= 32) { n_luts = __builtin_ctzl(N/4); hardcoded = 1; } - - for(i=0;i= 9) { + P(0x66); + P(0x0F); + P(0x1F); + P(0x84); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + + if (count > 9) { + ffts_insert_nops(p, count - 9); + } + } else { + switch(count) { + case 0: + break; + case 2: + P(0x66); + /* fall through */ + case 1: + P(0x90); + break; + case 3: + P(0x0F); + P(0x1F); + P(0x00); + break; + case 4: + P(0x0F); + P(0x1F); + P(0x40); + P(0x00); + break; + case 5: + P(0x0F); + P(0x1F); + P(0x44); + P(0x00); + P(0x00); + break; + case 6: + P(0x66); + P(0x0F); + P(0x1F); + P(0x44); + P(0x00); + P(0x00); + break; + case 7: + P(0x0F); + P(0x1F); + P(0x80); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + break; + case 8: + default: + P(0x0F); + P(0x1F); + P(0x84); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + break; + } + } +} -void align_mem16(uint8_t **p, uint32_t offset) { +static void ffts_align_mem16(uint8_t **p, uint32_t offset) +{ #ifdef __x86_64__ - int r = (16 - (offset & 0xf)) - ((uint32_t)(*p) & 0xf); - r = (16 + r) & 0xf; - insert_nops(p, r); + int r = (16 - (offset & 0xf)) - ((uintptr_t)(*p) & 0xf); + r = (16 + r) & 0xf; + ffts_insert_nops(p, r); #endif } -void ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leafN, int sign) { - int count = tree_count(N, leafN, 0) + 1; - size_t *ps = malloc(count * 2 * sizeof(size_t)); - size_t *pps = ps; +transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) +{ + uint32_t offsets[8] = {0, N, N/2, 3*N/2, N/4, 5*N/4, 7*N/4, 3*N/4}; + uint32_t offsets_o[8] = {0, N, N/2, 3*N/2, 7*N/4, 3*N/4, N/4, 5*N/4}; + + int32_t pAddr = 0; + int32_t pN = 0; + int32_t pLUT = 0; + + insns_t *fp; + insns_t *start; + insns_t *x_4_addr; + insns_t *x_8_addr; + uint32_t loop_count; + + int count; + int i; + ptrdiff_t len; + + size_t *ps; + size_t *pps; + + count = ffts_tree_count(N, leaf_N, 0) + 1; + + ps = pps = malloc(2 * count * sizeof(*ps)); + if (!ps) { + return NULL; + } + + ffts_elaborate_tree(&pps, N, leaf_N, 0); + + pps[0] = 0; + pps[1] = 0; + + pps = ps; #ifdef __x86_64__ - if(sign < 0) p->constants = sse_constants; - else p->constants = sse_constants_inv; + if (sign < 0) { + p->constants = sse_constants; + } else { + p->constants = sse_constants_inv; + } #endif - elaborate_tree(&pps, N, leafN, 0); - pps[0] = 0; - pps[1] = 0; + fp = (insns_t*) p->transform_base; - pps = ps; +#ifdef __arm__ +#ifdef HAVE_NEON + memcpy(fp, neon_x8, neon_x8_t - neon_x8); + /* + * Changes adds to subtracts and vice versa to allow the computation + * of both the IFFT and FFT + */ + if(sign < 0) { + fp[31] ^= 0x00200000; + fp[32] ^= 0x00200000; + fp[33] ^= 0x00200000; + fp[34] ^= 0x00200000; + fp[65] ^= 0x00200000; + fp[66] ^= 0x00200000; + fp[70] ^= 0x00200000; + fp[74] ^= 0x00200000; + fp[97] ^= 0x00200000; + fp[98] ^= 0x00200000; + fp[102] ^= 0x00200000; + fp[104] ^= 0x00200000; + } + fp += (neon_x8_t - neon_x8) / 4; +#else + memcpy(fp, vfp_x8, vfp_end - vfp_x8); + if(sign > 0) { + fp[65] ^= 0x00000040; + fp[66] ^= 0x00000040; + fp[68] ^= 0x00000040; + fp[70] ^= 0x00000040; + fp[103] ^= 0x00000040; + fp[104] ^= 0x00000040; + fp[105] ^= 0x00000040; + fp[108] ^= 0x00000040; + fp[113] ^= 0x00000040; + fp[114] ^= 0x00000040; + fp[117] ^= 0x00000040; + fp[118] ^= 0x00000040; + } + fp += (vfp_end - vfp_x8) / 4; +#endif +#else + /* align call destination */ + ffts_align_mem16(&fp, 0); + x_8_addr = fp; -#ifdef __arm__ - if(N < 8192) p->transform_size = 8192; - else p->transform_size = N; + /* align loop/jump destination */ +#ifdef _M_AMD64 + ffts_align_mem16(&fp, 6); #else - if(N < 2048) p->transform_size = 16384; - else p->transform_size = 16384 + 2*N/8 * __builtin_ctzl(N); + ffts_align_mem16(&fp, 5); +#endif + + /* copy function */ + assert((char*) x8_soft_end > (char*) x8_soft); + len = (char*) x8_soft_end - (char*) x8_soft; + memcpy(fp, x8_soft, (size_t) len); + fp += len; #endif + //uint32_t *x_8_t_addr = fp; + //memcpy(fp, neon_x8_t, neon_end - neon_x8_t); + //fp += (neon_end - neon_x8_t) / 4; -#ifdef __APPLE__ - p->transform_base = mmap(NULL, p->transform_size, PROT_WRITE | PROT_READ, MAP_ANON | MAP_SHARED, -1, 0); +#ifdef __arm__ +#ifdef HAVE_NEON + memcpy(fp, neon_x4, neon_x8 - neon_x4); + if(sign < 0) { + fp[26] ^= 0x00200000; + fp[28] ^= 0x00200000; + fp[31] ^= 0x00200000; + fp[32] ^= 0x00200000; + } + fp += (neon_x8 - neon_x4) / 4; #else -#define MAP_ANONYMOUS 0x20 - p->transform_base = mmap(NULL, p->transform_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_SHARED, -1, 0); + memcpy(fp, vfp_x4, vfp_x8 - vfp_x4); + if(sign > 0) { + fp[36] ^= 0x00000040; + fp[38] ^= 0x00000040; + fp[43] ^= 0x00000040; + fp[44] ^= 0x00000040; + } + fp += (vfp_x8 - vfp_x4) / 4; +#endif +#else + /* align call destination */ + ffts_align_mem16(&fp, 0); + x_4_addr = fp; + + /* copy function */ + assert((char*) x8_soft > (char*) x4); + len = (char*) x8_soft - (char*) x4; + memcpy(fp, x4, (size_t) len); + fp += len; #endif -/* - if(p->transform_base == MAP_FAILED) { - fprintf(stderr, "MAP FAILED\n"); - exit(1); - }*/ - insns_t *func = p->transform_base;//valloc(8192); - insns_t *fp = func; - -//fprintf(stderr, "Allocating %d bytes \n", p->transform_size); -//fprintf(stderr, "Base address = %016p\n", func); - - if(!func) { - fprintf(stderr, "NOMEM\n"); - exit(1); - } - - insns_t *x_8_addr = fp; #ifdef __arm__ + start = fp; + + *fp = PUSH_LR(); + fp++; + *fp = 0xed2d8b10; + fp++; + + ADDI(&fp, 3, 1, 0); + ADDI(&fp, 7, 1, N); + ADDI(&fp, 5, 1, 2*N); + ADDI(&fp, 10, 7, 2*N); + ADDI(&fp, 4, 5, 2*N); + ADDI(&fp, 8, 10, 2*N); + ADDI(&fp, 6, 4, 2*N); + ADDI(&fp, 9, 8, 2*N); + + *fp = LDRI(12, 0, ((uint32_t)&p->offsets) - ((uint32_t)p)); + fp++; // load offsets into r12 + // *fp++ = LDRI(1, 0, 4); // load ws into r1 + ADDI(&fp, 1, 0, 0); + + ADDI(&fp, 0, 2, 0), // mov out into r0 + *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); + fp++; + #ifdef HAVE_NEON - memcpy(fp, neon_x8, neon_x8_t - neon_x8); - /* - * Changes adds to subtracts and vice versa to allow the computation - * of both the IFFT and FFT - */ - if(sign < 0) { - fp[31] ^= 0x00200000; fp[32] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000; - fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[70] ^= 0x00200000; fp[74] ^= 0x00200000; - fp[97] ^= 0x00200000; fp[98] ^= 0x00200000; fp[102] ^= 0x00200000; fp[104] ^= 0x00200000; - } - fp += (neon_x8_t - neon_x8) / 4; + MOVI(&fp, 11, p->i0); +#else + MOVI(&fp, 11, p->i0); +#endif +#else + /* align call destination */ + ffts_align_mem16(&fp, 0); + start = fp; + + /* save nonvolatile registers */ +#ifdef _M_AMD64 + /* use the shadow space to save first 3 registers */ + + /* mov [rsp + 8], rbx */ + *fp++ = 0x48; + *fp++ = 0x89; + *fp++ = 0x5C; + *fp++ = 0x24; + *fp++ = 0x08; + + /* mov [rsp + 16], rsi */ + *fp++ = 0x48; + *fp++ = 0x89; + *fp++ = 0x74; + *fp++ = 0x24; + *fp++ = 0x10; + + /* mov [rsp + 24], rdi */ + *fp++ = 0x48; + *fp++ = 0x89; + *fp++ = 0x7C; + *fp++ = 0x24; + *fp++ = 0x18; #else - memcpy(fp, vfp_x8, vfp_end - vfp_x8); - if(sign > 0) { - fp[65] ^= 0x00000040; - fp[66] ^= 0x00000040; - fp[68] ^= 0x00000040; - fp[70] ^= 0x00000040; - fp[103] ^= 0x00000040; - fp[104] ^= 0x00000040; - fp[105] ^= 0x00000040; - fp[108] ^= 0x00000040; - fp[113] ^= 0x00000040; - fp[114] ^= 0x00000040; - fp[117] ^= 0x00000040; - fp[118] ^= 0x00000040; - } - fp += (vfp_end - vfp_x8) / 4; + PUSH(&fp, RBP); + PUSH(&fp, RBX); + PUSH(&fp, R10); + PUSH(&fp, R11); + PUSH(&fp, R12); + PUSH(&fp, R13); + PUSH(&fp, R14); + PUSH(&fp, R15); #endif + + /* assign loop counter register */ + loop_count = p->i0 * 4; +#ifdef _M_AMD64 + MOVI(&fp, EBX, loop_count); #else - align_mem16(&fp, 0); - x_8_addr = fp; - align_mem16(&fp, 5); - memcpy(fp, x8_soft, x8_hard - x8_soft); - fp += (x8_hard - x8_soft); -//fprintf(stderr, "X8 start address = %016p\n", x_8_addr); + MOVI(&fp, ECX, loop_count); +#endif #endif -//uint32_t *x_8_t_addr = fp; -//memcpy(fp, neon_x8_t, neon_end - neon_x8_t); -//fp += (neon_end - neon_x8_t) / 4; - insns_t *x_4_addr = fp; + #ifdef __arm__ - #ifdef HAVE_NEON - memcpy(fp, neon_x4, neon_x8 - neon_x4); - if(sign < 0) { - fp[26] ^= 0x00200000; fp[28] ^= 0x00200000; fp[31] ^= 0x00200000; fp[32] ^= 0x00200000; - } - fp += (neon_x8 - neon_x4) / 4; - #else - memcpy(fp, vfp_x4, vfp_x8 - vfp_x4); - if(sign > 0) { - fp[36] ^= 0x00000040; - fp[38] ^= 0x00000040; - fp[43] ^= 0x00000040; - fp[44] ^= 0x00000040; - } - fp += (vfp_x8 - vfp_x4) / 4; - #endif +#ifdef HAVE_NEON + memcpy(fp, neon_ee, neon_oo - neon_ee); + if (sign < 0) { + fp[33] ^= 0x00200000; + fp[37] ^= 0x00200000; + fp[38] ^= 0x00200000; + fp[39] ^= 0x00200000; + fp[40] ^= 0x00200000; + fp[41] ^= 0x00200000; + fp[44] ^= 0x00200000; + fp[45] ^= 0x00200000; + fp[46] ^= 0x00200000; + fp[47] ^= 0x00200000; + fp[48] ^= 0x00200000; + fp[57] ^= 0x00200000; + } + + fp += (neon_oo - neon_ee) / 4; +#else + memcpy(fp, vfp_e, vfp_o - vfp_e); + + if (sign > 0) { + fp[64] ^= 0x00000040; + fp[65] ^= 0x00000040; + fp[68] ^= 0x00000040; + fp[75] ^= 0x00000040; + fp[76] ^= 0x00000040; + fp[79] ^= 0x00000040; + fp[80] ^= 0x00000040; + fp[83] ^= 0x00000040; + fp[84] ^= 0x00000040; + fp[87] ^= 0x00000040; + fp[91] ^= 0x00000040; + fp[93] ^= 0x00000040; + } + fp += (vfp_o - vfp_e) / 4; +#endif #else - align_mem16(&fp, 0); - x_4_addr = fp; - memcpy(fp, x4, x8_soft - x4); - fp += (x8_soft - x4); + //fprintf(stderr, "Body start address = %016p\n", start); + /* copy function */ + assert((char*) leaf_ee > (char*) leaf_ee_init); + len = (char*) leaf_ee - (char*) leaf_ee_init; + memcpy(fp, leaf_ee_init, (size_t) len); + fp += len; + + /* align loop/jump destination */ +#ifdef _M_AMD64 + ffts_align_mem16(&fp, 8); +#else + ffts_align_mem16(&fp, 9); #endif - insns_t *start = fp; - -#ifdef __arm__ - *fp = PUSH_LR(); fp++; - *fp = 0xed2d8b10; fp++; - - ADDI(&fp, 3, 1, 0); - ADDI(&fp, 7, 1, N); - ADDI(&fp, 5, 1, 2*N); - ADDI(&fp, 10, 7, 2*N); - ADDI(&fp, 4, 5, 2*N); - ADDI(&fp, 8, 10, 2*N); - ADDI(&fp, 6, 4, 2*N); - ADDI(&fp, 9, 8, 2*N); - - *fp = LDRI(12, 0, ((uint32_t)&p->offsets) - ((uint32_t)p)); fp++; // load offsets into r12 -// *fp++ = LDRI(1, 0, 4); // load ws into r1 - ADDI(&fp, 1, 0, 0); - - ADDI(&fp, 0, 2, 0), // mov out into r0 + + /* copy function */ + assert((char*) leaf_oo > (char*) leaf_ee); + len = (char*) leaf_oo - (char*) leaf_ee; + memcpy(fp, leaf_ee, (size_t) len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_ee_offsets[i], 4 * offsets[i]); + } + + fp += len; + + if (ffts_ctzl(N) & 1) { + if (p->i1) { + loop_count += 4 * p->i1; + + /* align loop/jump destination */ +#ifdef _M_AMD64 + MOVI(&fp, EBX, loop_count); + ffts_align_mem16(&fp, 3); +#else + MOVI(&fp, ECX, loop_count); + ffts_align_mem16(&fp, 4); #endif + /* copy function */ + assert((char*) leaf_eo > (char*) leaf_oo); + len = (char*) leaf_eo - (char*) leaf_oo; + memcpy(fp, leaf_oo, len); -#ifdef __arm__ - *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++; - #ifdef HAVE_NEON - MOVI(&fp, 11, p->i0); - #else - MOVI(&fp, 11, p->i0); - #endif + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_oo_offsets[i], 4 * offsets_o[i]); + } + + fp += len; + } + + loop_count += 4; + + /* copy function */ + assert((char*) leaf_end > (char*) leaf_oe); + len = (char*) leaf_end - (char*) leaf_oe; + memcpy(fp, leaf_oe, len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_oe_offsets[i], 4 * offsets_o[i]); + } + fp += len; + } else { + loop_count += 4; + + /* copy function */ + assert((char*) leaf_oe > (char*) leaf_eo); + len = (char*) leaf_oe - (char*) leaf_eo; + memcpy(fp, leaf_eo, len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_eo_offsets[i], 4 * offsets[i]); + } + + fp += len; + + if (p->i1) { + loop_count += 4 * p->i1; + + /* align loop/jump destination */ +#ifdef _M_AMD64 + MOVI(&fp, EBX, loop_count); + ffts_align_mem16(&fp, 3); #else - align_mem16(&fp, 0); - start = fp; - - *fp++ = 0x4c; - *fp++ = 0x8b; - *fp++ = 0x07; - uint32_t lp_cnt = p->i0 * 4; - MOVI(&fp, RCX, lp_cnt); - - //LEA(&fp, R8, RDI, ((uint32_t)&p->offsets) - ((uint32_t)p)); + MOVI(&fp, ECX, loop_count); + ffts_align_mem16(&fp, 4); #endif - //fp++; -#ifdef __arm__ -#ifdef HAVE_NEON - memcpy(fp, neon_ee, neon_oo - neon_ee); - if(sign < 0) { - fp[33] ^= 0x00200000; fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[39] ^= 0x00200000; - fp[40] ^= 0x00200000; fp[41] ^= 0x00200000; fp[44] ^= 0x00200000; fp[45] ^= 0x00200000; - fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000; - } - fp += (neon_oo - neon_ee) / 4; + + /* copy function */ + assert((char*) leaf_eo > (char*) leaf_oo); + len = (char*) leaf_eo - (char*) leaf_oo; + memcpy(fp, leaf_oo, len); + + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_oo_offsets[i], 4 * offsets_o[i]); + } + + fp += len; + } + } + + if (p->i1) { + uint32_t offsets_oe[8] = {7*N/4, 3*N/4, N/4, 5*N/4, 0, N, 3*N/2, N/2}; + + loop_count += 4 * p->i1; + + /* align loop/jump destination */ +#ifdef _M_AMD64 + MOVI(&fp, EBX, loop_count); + ffts_align_mem16(&fp, 8); #else - memcpy(fp, vfp_e, vfp_o - vfp_e); - if(sign > 0) { - fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[68] ^= 0x00000040; fp[75] ^= 0x00000040; - fp[76] ^= 0x00000040; fp[79] ^= 0x00000040; fp[80] ^= 0x00000040; fp[83] ^= 0x00000040; - fp[84] ^= 0x00000040; fp[87] ^= 0x00000040; fp[91] ^= 0x00000040; fp[93] ^= 0x00000040; - } - fp += (vfp_o - vfp_e) / 4; + MOVI(&fp, ECX, loop_count); + ffts_align_mem16(&fp, 9); #endif + + assert((char*) leaf_oo > (char*) leaf_ee); + len = (char*) leaf_oo - (char*) leaf_ee; + memcpy(fp, leaf_ee, len); + + for (i = 0; i < 8; i++) { + IMM32_NI(fp + sse_leaf_ee_offsets[i], 4 * offsets_oe[i]); + } + + fp += len; + } + + //fprintf(stderr, "Body start address = %016p\n", fp); + //LEA(&fp, R8, RDI, ((uint32_t)&p->ws) - ((uint32_t)p)); + memcpy(fp, x_init, (char*) x4 - (char*) x_init); + //IMM32_NI(fp + 3, ((int64_t)READ_IMM32(fp + 3)) + ((void *)x_init - (void *)fp )); + fp += ((char*) x4 - (char*) x_init); + + count = 2; + while (pps[0]) { + size_t ws_is; + + if (!pN) { +#ifdef _M_AMD64 + MOVI(&fp, EBX, pps[0]); #else -//fprintf(stderr, "Body start address = %016p\n", start); - - PUSH(&fp, RBP); - PUSH(&fp, RBX); - PUSH(&fp, R10); - PUSH(&fp, R11); - PUSH(&fp, R12); - PUSH(&fp, R13); - PUSH(&fp, R14); - PUSH(&fp, R15); - - int i; - memcpy(fp, leaf_ee_init, leaf_ee - leaf_ee_init); - -//fprintf(stderr, "Leaf ee init address = %016p\n", leaf_ee_init); -//fprintf(stderr, "Constants address = %016p\n", sse_constants); -//fprintf(stderr, "Constants address = %016p\n", p->constants); - -//int32_t val = READ_IMM32(fp + 3); -//fprintf(stderr, "diff = 0x%x\n", ((uint32_t)&p->constants) - ((uint32_t)p)); - -//int64_t v2 = val + (int64_t)((void *)leaf_ee_init - (void *)fp ); -//fprintf(stderr, "IMM = 0x%llx\n", v2); - -//IMM32_NI(fp + 3, ((int64_t) READ_IMM32(fp + 3)) + ((void *)leaf_ee_init - (void *)fp )); - fp += (leaf_ee - leaf_ee_init); - -//fprintf(stderr, "Leaf start address = %016p\n", fp); - align_mem16(&fp, 9); - memcpy(fp, leaf_ee, leaf_oo - leaf_ee); - - - uint32_t offsets[8] = {0, N, N/2, 3*N/2, N/4, 5*N/4, 7*N/4, 3*N/4}; - uint32_t offsets_o[8] = {0, N, N/2, 3*N/2, 7*N/4, 3*N/4, N/4, 5*N/4}; - uint32_t offsets_oe[8] = {7*N/4, 3*N/4, N/4, 5*N/4, 0, N, 3*N/2, N/2}; - - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_ee_offsets[i], offsets[i]*4); - - fp += (leaf_oo - leaf_ee); - - if(__builtin_ctzl(N) & 1){ - - if(p->i1) { - lp_cnt += p->i1 * 4; - MOVI(&fp, RCX, lp_cnt); - align_mem16(&fp, 4); - memcpy(fp, leaf_oo, leaf_eo - leaf_oo); - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oo_offsets[i], offsets_o[i]*4); - fp += (leaf_eo - leaf_oo); - } - - - memcpy(fp, leaf_oe, leaf_end - leaf_oe); - lp_cnt += 4; - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oe_offsets[i], offsets_o[i]*4); - fp += (leaf_end - leaf_oe); - - }else{ - - - memcpy(fp, leaf_eo, leaf_oe - leaf_eo); - lp_cnt += 4; - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_eo_offsets[i], offsets[i]*4); - fp += (leaf_oe - leaf_eo); - - if(p->i1) { - lp_cnt += p->i1 * 4; - MOVI(&fp, RCX, lp_cnt); - align_mem16(&fp, 4); - memcpy(fp, leaf_oo, leaf_eo - leaf_oo); - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_oo_offsets[i], offsets_o[i]*4); - fp += (leaf_eo - leaf_oo); - } - - } - if(p->i1) { - lp_cnt += p->i1 * 4; - MOVI(&fp, RCX, lp_cnt); - align_mem16(&fp, 9); - memcpy(fp, leaf_ee, leaf_oo - leaf_ee); - for(i=0;i<8;i++) IMM32_NI(fp + sse_leaf_ee_offsets[i], offsets_oe[i]*4); - fp += (leaf_oo - leaf_ee); - - } - -//fprintf(stderr, "Body start address = %016p\n", fp); - //LEA(&fp, R8, RDI, ((uint32_t)&p->ws) - ((uint32_t)p)); - memcpy(fp, x_init, x4 - x_init); -//IMM32_NI(fp + 3, ((int64_t)READ_IMM32(fp + 3)) + ((void *)x_init - (void *)fp )); - fp += (x4 - x_init); - - int32_t pAddr = 0; - int32_t pN = 0; - int32_t pLUT = 0; - count = 2; - while(pps[0]) { - - if(!pN) { - MOVI(&fp, RCX, pps[0] / 4); - }else{ - if((pps[1]*4)-pAddr) ADDI(&fp, RDX, (pps[1] * 4)- pAddr); - if(pps[0] > leafN && pps[0] - pN) { - - int diff = __builtin_ctzl(pps[0]) - __builtin_ctzl(pN); - *fp++ = 0xc1; - - if(diff > 0) { - *fp++ = 0xe1; - *fp++ = (diff & 0xff); - }else{ - *fp++ = 0xe9; - *fp++ = ((-diff) & 0xff); - } - } - } - - if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT) - ADDI(&fp, R8, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT); - - - if(pps[0] == 2*leafN) { - CALL(&fp, x_4_addr); - // }else if(!pps[2]){ - // //uint32_t *x_8_t_addr = fp; - // memcpy(fp, neon_x8_t, neon_ee - neon_x8_t); - // fp += (neon_ee - neon_x8_t) / 4; - // //*fp++ = BL(fp+2, x_8_t_addr); - }else{ - CALL(&fp, x_8_addr); - } - - pAddr = pps[1] * 4; - if(pps[0] > leafN) - pN = pps[0]; - pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN); -// fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); - count += 4; - pps += 2; - } + MOVI(&fp, ECX, pps[0] / 4); #endif + } else { + int offset = (4 * pps[1]) - pAddr; + if (offset) { +#ifdef _M_AMD64 + ADDI(&fp, R8, offset); +#else + ADDI(&fp, RDX, offset); +#endif + } + + if (pps[0] > leaf_N && pps[0] - pN) { + int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN); + +#ifdef _M_AMD64 + SHIFT(&fp, EBX, factor); +#else + SHIFT(&fp, ECX, factor); +#endif + } + } + + ws_is = 8 * p->ws_is[ffts_ctzl(pps[0] / leaf_N) - 1]; + if (ws_is != pLUT) { + int offset = (int) (ws_is - pLUT); + +#ifdef _M_AMD64 + ADDI(&fp, RDI, offset); +#else + ADDI(&fp, R8, offset); +#endif + } + + if (pps[0] == 2 * leaf_N) { + CALL(&fp, x_4_addr); + } else { + CALL(&fp, x_8_addr); + } + + pAddr = 4 * pps[1]; + if (pps[0] > leaf_N) { + pN = pps[0]; + } + + pLUT = ws_is;//LUT_offset(pps[0], leafN); + //fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); + count += 4; + pps += 2; + } +#endif + #ifdef __arm__ #ifdef HAVE_NEON - if(__builtin_ctzl(N) & 1){ - ADDI(&fp, 2, 7, 0); - ADDI(&fp, 7, 9, 0); - ADDI(&fp, 9, 2, 0); - - ADDI(&fp, 2, 8, 0); - ADDI(&fp, 8, 10, 0); - ADDI(&fp, 10, 2, 0); - - if(p->i1) { - MOVI(&fp, 11, p->i1); - memcpy(fp, neon_oo, neon_eo - neon_oo); - if(sign < 0) { - fp[12] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000; fp[15] ^= 0x00200000; - fp[27] ^= 0x00200000; fp[29] ^= 0x00200000; fp[30] ^= 0x00200000; fp[31] ^= 0x00200000; - fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000; - } - fp += (neon_eo - neon_oo) / 4; - } - - *fp = LDRI(11, 1, ((uint32_t)&p->oe_ws) - ((uint32_t)p)); fp++; - - memcpy(fp, neon_oe, neon_end - neon_oe); - if(sign < 0) { - fp[19] ^= 0x00200000; fp[20] ^= 0x00200000; fp[22] ^= 0x00200000; fp[23] ^= 0x00200000; - fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[40] ^= 0x00200000; fp[41] ^= 0x00200000; - fp[64] ^= 0x00200000; fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[67] ^= 0x00200000; - } - fp += (neon_end - neon_oe) / 4; - - }else{ - - *fp = LDRI(11, 1, ((uint32_t)&p->eo_ws) - ((uint32_t)p)); fp++; - - memcpy(fp, neon_eo, neon_oe - neon_eo); - if(sign < 0) { - fp[10] ^= 0x00200000; fp[11] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000; - fp[31] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000; fp[35] ^= 0x00200000; - fp[59] ^= 0x00200000; fp[60] ^= 0x00200000; fp[61] ^= 0x00200000; fp[62] ^= 0x00200000; - } - fp += (neon_oe - neon_eo) / 4; - - ADDI(&fp, 2, 7, 0); - ADDI(&fp, 7, 9, 0); - ADDI(&fp, 9, 2, 0); - - ADDI(&fp, 2, 8, 0); - ADDI(&fp, 8, 10, 0); - ADDI(&fp, 10, 2, 0); - - if(p->i1) { - MOVI(&fp, 11, p->i1); - memcpy(fp, neon_oo, neon_eo - neon_oo); - if(sign < 0) { - fp[12] ^= 0x00200000; fp[13] ^= 0x00200000; fp[14] ^= 0x00200000; fp[15] ^= 0x00200000; - fp[27] ^= 0x00200000; fp[29] ^= 0x00200000; fp[30] ^= 0x00200000; fp[31] ^= 0x00200000; - fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000; - } - fp += (neon_eo - neon_oo) / 4; - } - - } - - - if(p->i1) { - ADDI(&fp, 2, 3, 0); - ADDI(&fp, 3, 7, 0); - ADDI(&fp, 7, 2, 0); - - ADDI(&fp, 2, 4, 0); - ADDI(&fp, 4, 8, 0); - ADDI(&fp, 8, 2, 0); - - ADDI(&fp, 2, 5, 0); - ADDI(&fp, 5, 9, 0); - ADDI(&fp, 9, 2, 0); - - ADDI(&fp, 2, 6, 0); - ADDI(&fp, 6, 10, 0); - ADDI(&fp, 10, 2, 0); - - ADDI(&fp, 2, 9, 0); - ADDI(&fp, 9, 10, 0); - ADDI(&fp, 10, 2, 0); - - *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++; - MOVI(&fp, 11, p->i1); - memcpy(fp, neon_ee, neon_oo - neon_ee); - if(sign < 0) { - fp[33] ^= 0x00200000; fp[37] ^= 0x00200000; fp[38] ^= 0x00200000; fp[39] ^= 0x00200000; - fp[40] ^= 0x00200000; fp[41] ^= 0x00200000; fp[44] ^= 0x00200000; fp[45] ^= 0x00200000; - fp[46] ^= 0x00200000; fp[47] ^= 0x00200000; fp[48] ^= 0x00200000; fp[57] ^= 0x00200000; - } - fp += (neon_oo - neon_ee) / 4; - - } + if(__builtin_ctzl(N) & 1) { + ADDI(&fp, 2, 7, 0); + ADDI(&fp, 7, 9, 0); + ADDI(&fp, 9, 2, 0); + + ADDI(&fp, 2, 8, 0); + ADDI(&fp, 8, 10, 0); + ADDI(&fp, 10, 2, 0); + + if(p->i1) { + MOVI(&fp, 11, p->i1); + memcpy(fp, neon_oo, neon_eo - neon_oo); + if(sign < 0) { + fp[12] ^= 0x00200000; + fp[13] ^= 0x00200000; + fp[14] ^= 0x00200000; + fp[15] ^= 0x00200000; + fp[27] ^= 0x00200000; + fp[29] ^= 0x00200000; + fp[30] ^= 0x00200000; + fp[31] ^= 0x00200000; + fp[46] ^= 0x00200000; + fp[47] ^= 0x00200000; + fp[48] ^= 0x00200000; + fp[57] ^= 0x00200000; + } + fp += (neon_eo - neon_oo) / 4; + } + + *fp = LDRI(11, 1, ((uint32_t)&p->oe_ws) - ((uint32_t)p)); + fp++; + + memcpy(fp, neon_oe, neon_end - neon_oe); + if(sign < 0) { + fp[19] ^= 0x00200000; + fp[20] ^= 0x00200000; + fp[22] ^= 0x00200000; + fp[23] ^= 0x00200000; + fp[37] ^= 0x00200000; + fp[38] ^= 0x00200000; + fp[40] ^= 0x00200000; + fp[41] ^= 0x00200000; + fp[64] ^= 0x00200000; + fp[65] ^= 0x00200000; + fp[66] ^= 0x00200000; + fp[67] ^= 0x00200000; + } + fp += (neon_end - neon_oe) / 4; + + } else { + + *fp = LDRI(11, 1, ((uint32_t)&p->eo_ws) - ((uint32_t)p)); + fp++; + + memcpy(fp, neon_eo, neon_oe - neon_eo); + if(sign < 0) { + fp[10] ^= 0x00200000; + fp[11] ^= 0x00200000; + fp[13] ^= 0x00200000; + fp[14] ^= 0x00200000; + fp[31] ^= 0x00200000; + fp[33] ^= 0x00200000; + fp[34] ^= 0x00200000; + fp[35] ^= 0x00200000; + fp[59] ^= 0x00200000; + fp[60] ^= 0x00200000; + fp[61] ^= 0x00200000; + fp[62] ^= 0x00200000; + } + fp += (neon_oe - neon_eo) / 4; + + ADDI(&fp, 2, 7, 0); + ADDI(&fp, 7, 9, 0); + ADDI(&fp, 9, 2, 0); + + ADDI(&fp, 2, 8, 0); + ADDI(&fp, 8, 10, 0); + ADDI(&fp, 10, 2, 0); + + if(p->i1) { + MOVI(&fp, 11, p->i1); + memcpy(fp, neon_oo, neon_eo - neon_oo); + if(sign < 0) { + fp[12] ^= 0x00200000; + fp[13] ^= 0x00200000; + fp[14] ^= 0x00200000; + fp[15] ^= 0x00200000; + fp[27] ^= 0x00200000; + fp[29] ^= 0x00200000; + fp[30] ^= 0x00200000; + fp[31] ^= 0x00200000; + fp[46] ^= 0x00200000; + fp[47] ^= 0x00200000; + fp[48] ^= 0x00200000; + fp[57] ^= 0x00200000; + } + fp += (neon_eo - neon_oo) / 4; + } + + } + + + if(p->i1) { + ADDI(&fp, 2, 3, 0); + ADDI(&fp, 3, 7, 0); + ADDI(&fp, 7, 2, 0); + + ADDI(&fp, 2, 4, 0); + ADDI(&fp, 4, 8, 0); + ADDI(&fp, 8, 2, 0); + + ADDI(&fp, 2, 5, 0); + ADDI(&fp, 5, 9, 0); + ADDI(&fp, 9, 2, 0); + + ADDI(&fp, 2, 6, 0); + ADDI(&fp, 6, 10, 0); + ADDI(&fp, 10, 2, 0); + + ADDI(&fp, 2, 9, 0); + ADDI(&fp, 9, 10, 0); + ADDI(&fp, 10, 2, 0); + + *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); + fp++; + MOVI(&fp, 11, p->i1); + memcpy(fp, neon_ee, neon_oo - neon_ee); + if(sign < 0) { + fp[33] ^= 0x00200000; + fp[37] ^= 0x00200000; + fp[38] ^= 0x00200000; + fp[39] ^= 0x00200000; + fp[40] ^= 0x00200000; + fp[41] ^= 0x00200000; + fp[44] ^= 0x00200000; + fp[45] ^= 0x00200000; + fp[46] ^= 0x00200000; + fp[47] ^= 0x00200000; + fp[48] ^= 0x00200000; + fp[57] ^= 0x00200000; + } + fp += (neon_oo - neon_ee) / 4; + + } #else - ADDI(&fp, 2, 7, 0); - ADDI(&fp, 7, 9, 0); - ADDI(&fp, 9, 2, 0); - - ADDI(&fp, 2, 8, 0); - ADDI(&fp, 8, 10, 0); - ADDI(&fp, 10, 2, 0); - - MOVI(&fp, 11, (p->i1>0) ? p->i1 : 1); - memcpy(fp, vfp_o, vfp_x4 - vfp_o); - if(sign > 0) { - fp[22] ^= 0x00000040; fp[24] ^= 0x00000040; fp[25] ^= 0x00000040; fp[26] ^= 0x00000040; - fp[62] ^= 0x00000040; fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[66] ^= 0x00000040; - } - fp += (vfp_x4 - vfp_o) / 4; - - ADDI(&fp, 2, 3, 0); - ADDI(&fp, 3, 7, 0); - ADDI(&fp, 7, 2, 0); - - ADDI(&fp, 2, 4, 0); - ADDI(&fp, 4, 8, 0); - ADDI(&fp, 8, 2, 0); - - ADDI(&fp, 2, 5, 0); - ADDI(&fp, 5, 9, 0); - ADDI(&fp, 9, 2, 0); - - ADDI(&fp, 2, 6, 0); - ADDI(&fp, 6, 10, 0); - ADDI(&fp, 10, 2, 0); - - ADDI(&fp, 2, 9, 0); - ADDI(&fp, 9, 10, 0); - ADDI(&fp, 10, 2, 0); - - *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); fp++; - MOVI(&fp, 11, (p->i2>0) ? p->i2 : 1); - memcpy(fp, vfp_e, vfp_o - vfp_e); - if(sign > 0) { - fp[64] ^= 0x00000040; fp[65] ^= 0x00000040; fp[68] ^= 0x00000040; fp[75] ^= 0x00000040; - fp[76] ^= 0x00000040; fp[79] ^= 0x00000040; fp[80] ^= 0x00000040; fp[83] ^= 0x00000040; - fp[84] ^= 0x00000040; fp[87] ^= 0x00000040; fp[91] ^= 0x00000040; fp[93] ^= 0x00000040; - } - fp += (vfp_o - vfp_e) / 4; + ADDI(&fp, 2, 7, 0); + ADDI(&fp, 7, 9, 0); + ADDI(&fp, 9, 2, 0); + + ADDI(&fp, 2, 8, 0); + ADDI(&fp, 8, 10, 0); + ADDI(&fp, 10, 2, 0); + + MOVI(&fp, 11, (p->i1>0) ? p->i1 : 1); + memcpy(fp, vfp_o, vfp_x4 - vfp_o); + if(sign > 0) { + fp[22] ^= 0x00000040; + fp[24] ^= 0x00000040; + fp[25] ^= 0x00000040; + fp[26] ^= 0x00000040; + fp[62] ^= 0x00000040; + fp[64] ^= 0x00000040; + fp[65] ^= 0x00000040; + fp[66] ^= 0x00000040; + } + fp += (vfp_x4 - vfp_o) / 4; + + ADDI(&fp, 2, 3, 0); + ADDI(&fp, 3, 7, 0); + ADDI(&fp, 7, 2, 0); + + ADDI(&fp, 2, 4, 0); + ADDI(&fp, 4, 8, 0); + ADDI(&fp, 8, 2, 0); + + ADDI(&fp, 2, 5, 0); + ADDI(&fp, 5, 9, 0); + ADDI(&fp, 9, 2, 0); + + ADDI(&fp, 2, 6, 0); + ADDI(&fp, 6, 10, 0); + ADDI(&fp, 10, 2, 0); + + ADDI(&fp, 2, 9, 0); + ADDI(&fp, 9, 10, 0); + ADDI(&fp, 10, 2, 0); + + *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); + fp++; + MOVI(&fp, 11, (p->i2>0) ? p->i2 : 1); + memcpy(fp, vfp_e, vfp_o - vfp_e); + if(sign > 0) { + fp[64] ^= 0x00000040; + fp[65] ^= 0x00000040; + fp[68] ^= 0x00000040; + fp[75] ^= 0x00000040; + fp[76] ^= 0x00000040; + fp[79] ^= 0x00000040; + fp[80] ^= 0x00000040; + fp[83] ^= 0x00000040; + fp[84] ^= 0x00000040; + fp[87] ^= 0x00000040; + fp[91] ^= 0x00000040; + fp[93] ^= 0x00000040; + } + fp += (vfp_o - vfp_e) / 4; #endif - *fp = LDRI(2, 1, ((uint32_t)&p->ws) - ((uint32_t)p)); fp++; // load offsets into r12 - //ADDI(&fp, 2, 1, 0); - MOVI(&fp, 1, 0); - - // args: r0 - out - // r1 - N - // r2 - ws -// ADDI(&fp, 3, 1, 0); // put N into r3 for counter - - int32_t pAddr = 0; - int32_t pN = 0; - int32_t pLUT = 0; - count = 2; - while(pps[0]) { - -// fprintf(stderr, "size %zu at %zu - diff %zu\n", pps[0], pps[1]*4, (pps[1]*4) - pAddr); - if(!pN) { - MOVI(&fp, 1, pps[0]); - }else{ - if((pps[1]*4)-pAddr) ADDI(&fp, 0, 0, (pps[1] * 4)- pAddr); - if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN); - } - - if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT) - ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT); - - - if(pps[0] == 2*leafN) { - *fp = BL(fp+2, x_4_addr); fp++; - }else if(!pps[2]){ - //uint32_t *x_8_t_addr = fp; + *fp = LDRI(2, 1, ((uint32_t)&p->ws) - ((uint32_t)p)); + fp++; // load offsets into r12 + //ADDI(&fp, 2, 1, 0); + MOVI(&fp, 1, 0); + + // args: r0 - out + // r1 - N + // r2 - ws + // ADDI(&fp, 3, 1, 0); // put N into r3 for counter + + int32_t pAddr = 0; + int32_t pN = 0; + int32_t pLUT = 0; + count = 2; + while(pps[0]) { + + // fprintf(stderr, "size %zu at %zu - diff %zu\n", pps[0], pps[1]*4, (pps[1]*4) - pAddr); + if(!pN) { + MOVI(&fp, 1, pps[0]); + } else { + if((pps[1]*4)-pAddr) ADDI(&fp, 0, 0, (pps[1] * 4)- pAddr); + if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN); + } + + if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT) + ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT); + + + if(pps[0] == 2*leafN) { + *fp = BL(fp+2, x_4_addr); + fp++; + } else if(!pps[2]) { + //uint32_t *x_8_t_addr = fp; #ifdef HAVE_NEON - memcpy(fp, neon_x8_t, neon_ee - neon_x8_t); - if(sign < 0) { - fp[31] ^= 0x00200000; fp[32] ^= 0x00200000; fp[33] ^= 0x00200000; fp[34] ^= 0x00200000; - fp[65] ^= 0x00200000; fp[66] ^= 0x00200000; fp[70] ^= 0x00200000; fp[74] ^= 0x00200000; - fp[97] ^= 0x00200000; fp[98] ^= 0x00200000; fp[102] ^= 0x00200000; fp[104] ^= 0x00200000; - } - fp += (neon_ee - neon_x8_t) / 4; - //*fp++ = BL(fp+2, x_8_t_addr); + memcpy(fp, neon_x8_t, neon_ee - neon_x8_t); + if(sign < 0) { + fp[31] ^= 0x00200000; + fp[32] ^= 0x00200000; + fp[33] ^= 0x00200000; + fp[34] ^= 0x00200000; + fp[65] ^= 0x00200000; + fp[66] ^= 0x00200000; + fp[70] ^= 0x00200000; + fp[74] ^= 0x00200000; + fp[97] ^= 0x00200000; + fp[98] ^= 0x00200000; + fp[102] ^= 0x00200000; + fp[104] ^= 0x00200000; + } + fp += (neon_ee - neon_x8_t) / 4; + //*fp++ = BL(fp+2, x_8_t_addr); #else - *fp = BL(fp+2, x_8_addr); fp++; + *fp = BL(fp+2, x_8_addr); + fp++; #endif - }else{ - *fp = BL(fp+2, x_8_addr); fp++; - } - - pAddr = pps[1] * 4; - pN = pps[0]; - pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN); -// fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); - count += 4; - pps += 2; - } - - *fp++ = 0xecbd8b10; - *fp++ = POP_LR(); count++; + } else { + *fp = BL(fp+2, x_8_addr); + fp++; + } + + pAddr = pps[1] * 4; + pN = pps[0]; + pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN); + // fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); + count += 4; + pps += 2; + } + + *fp++ = 0xecbd8b10; + *fp++ = POP_LR(); + count++; #else - POP(&fp, R15); - POP(&fp, R14); - POP(&fp, R13); - POP(&fp, R12); - POP(&fp, R11); - POP(&fp, R10); - POP(&fp, RBX); - POP(&fp, RBP); - RET(&fp); - -//uint8_t *pp = func; -//int counter = 0; -//do{ -// printf("%02x ", *pp); -// if(counter++ % 16 == 15) printf("\n"); -//} while(++pp < fp); + /* restore nonvolatile registers */ +#ifdef _M_AMD64 + /* mov rbx, [rsp + 8] */ + *fp++ = 0x48; + *fp++ = 0x8B; + *fp++ = 0x5C; + *fp++ = 0x24; + *fp++ = 0x08; + + /* mov rsi, [rsp + 16] */ + *fp++ = 0x48; + *fp++ = 0x8B; + *fp++ = 0x74; + *fp++ = 0x24; + *fp++ = 0x10; + + /* mov rdi, [rsp + 24] */ + *fp++ = 0x48; + *fp++ = 0x8B; + *fp++ = 0x7C; + *fp++ = 0x24; + *fp++ = 0x18; +#else + POP(&fp, R15); + POP(&fp, R14); + POP(&fp, R13); + POP(&fp, R12); + POP(&fp, R11); + POP(&fp, R10); + POP(&fp, RBX); + POP(&fp, RBP); +#endif -//printf("\n"); + RET(&fp); + //uint8_t *pp = func; + //int counter = 0; + //do{ + // printf("%02x ", *pp); + // if(counter++ % 16 == 15) printf("\n"); + //} while(++pp < fp); + //printf("\n"); #endif + // *fp++ = B(14); count++; -// *fp++ = B(14); count++; - -//for(int i=0;i<(neon_x8 - neon_x4)/4;i++) -// fprintf(stderr, "%08x\n", x_4_addr[i]); -//fprintf(stderr, "\n"); -//for(int i=0;itransform_size, PROT_READ | PROT_EXEC)) { - perror("Couldn't mprotect"); - exit(1); - } -#ifdef __APPLE__ - sys_icache_invalidate(func, p->transform_size); -#elif __ANDROID__ - cacheflush((long)(func), (long)(func) + p->transform_size, 0); -#elif __linux__ -#ifdef __GNUC__ - __clear_cache((long)(func), (long)(func) + p->transform_size); -#endif -#endif + //for(int i=0;i<(neon_x8 - neon_x4)/4;i++) + // fprintf(stderr, "%08x\n", x_4_addr[i]); + //fprintf(stderr, "\n"); + //for(int i=0;itransform = (void *) (start); -} -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + free(ps); + + return (transform_func_t) start; +} \ No newline at end of file diff --git a/src/codegen.h b/src/codegen.h index c07144f..e3c2381 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,20 +31,15 @@ */ -#ifndef __CODEGEN_H__ -#define __CODEGEN_H__ +#ifndef FFTS_CODEGEN_H +#define FFTS_CODEGEN_H -#include -#include -#include -#include -#include -#include -#include /* for PAGESIZE */ +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif #include "ffts.h" -void ffts_generate_func_code(ffts_plan_t *, size_t N, size_t leafN, int sign); +transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign); -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_CODEGEN_H */ diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 6a38671..a63d21d 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -32,8 +32,8 @@ */ -#ifndef __CODEGEN_SSE_H__ -#define __CODEGEN_SSE_H__ +#ifndef FFTS_CODEGEN_SSE_H +#define FFTS_CODEGEN_SSE_H void neon_x4(float *, size_t, float *); void neon_x8(float *, size_t, float *); @@ -47,7 +47,7 @@ void leaf_end(); void x_init(); void x4(); void x8_soft(); -void x8_hard(); +void x8_soft_end(); void sse_constants(); void sse_constants_inv(); @@ -74,123 +74,157 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define RSI 6 #define RDI 7 #define RBP 5 -#define R8 8 -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 - -void IMM8(uint8_t **p, int32_t imm) { - *(*p)++ = (imm & 0xff); +#define R8 8 +#define R9 9 +#define R10 10 +#define R11 11 +#define R12 12 +#define R13 13 +#define R14 14 +#define R15 15 + +void IMM8(uint8_t **p, int32_t imm) +{ + *(*p)++ = (imm & 0xff); } -void IMM16(uint8_t **p, int32_t imm) { - int i; - for(i=0;i<2;i++) { - *(*p)++ = (imm & (0xff << (i*8))) >> (i*8); - } +void IMM16(uint8_t **p, int32_t imm) +{ + int i; + + for (i = 0; i < 2; i++) { + *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); + } } -void IMM32(uint8_t **p, int32_t imm) { - int i; - for(i=0;i<4;i++) { - *(*p)++ = (imm & (0xff << (i*8))) >> (i*8); - } + +void IMM32(uint8_t **p, int32_t imm) +{ + int i; + + for (i = 0; i < 4; i++) { + *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); + } } -void IMM32_NI(uint8_t *p, int32_t imm) { - int i; - for(i=0;i<4;i++) { - *(p+i) = (imm & (0xff << (i*8))) >> (i*8); - } + +void IMM32_NI(uint8_t *p, int32_t imm) +{ + int i; + + for (i = 0; i < 4; i++) { + *(p+i) = (imm & (0xff << (8 * i))) >> (8 * i); + } } -int32_t READ_IMM32(uint8_t *p) { - int32_t rval = 0; - int i; - for(i=0;i<4;i++) { - rval |= *(p+i) << (i*8); - } - return rval; +int32_t READ_IMM32(uint8_t *p) +{ + int32_t rval = 0; + int i; + + for (i = 0; i < 4; i++) { + rval |= *(p+i) << (8 * i); + } + + return rval; } -void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) { -// if(imm < 65536) *(*p)++ = 0x66; - if(dst >= 8) *(*p)++ = 0x41; - - //if(imm < 65536 && imm >= 256) *(*p)++ = 0x66; - - //if(imm >= 256) - *(*p)++ = 0xb8 | (dst & 0x7); -// else *(*p)++ = 0xb0 | (dst & 0x7); - - // if(imm < 256) IMM8(p, imm); -// else -//if(imm < 65536) IMM16(p, imm); -//else - IMM32(p, imm); - -//if(dst < 8) { -// *(*p)++ = 0xb8 + dst; -//}else{ -// *(*p)++ = 0x49; -// *(*p)++ = 0xc7; -// *(*p)++ = 0xc0 | (dst - 8); -//} -//IMM32(p, imm); +void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) +{ + if (dst >= 8) { + *(*p)++ = 0x41; + } + + *(*p)++ = 0xb8 | (dst & 0x7); + IMM32(p, imm); } -void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) { - if(disp == 0) { - *(*p)++ = (rm & 7) | ((reg & 7) << 3); - }else if(disp <= 127 || disp >= -128) { - *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3); - IMM8(p, disp); - }else{ - *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3); - IMM32(p, disp); - } +void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) +{ + if (disp == 0) { + *(*p)++ = (rm & 7) | ((reg & 7) << 3); + } else if (disp <= 127 || disp >= -128) { + *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3); + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3); + IMM32(p, disp); + } } -void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) { +void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) +{ + *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); + *(*p)++ = 0x8d; + ADDRMODE(p, dst, base, disp); +} - *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); - *(*p)++ = 0x8d; - ADDRMODE(p, dst, base, disp); +void RET(uint8_t **p) +{ + *(*p)++ = 0xc3; } -void RET(uint8_t **p) { - *(*p)++ = 0xc3; +void ADDI(uint8_t **p, uint8_t dst, int32_t imm) +{ + if (dst >= 8) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } + + if (imm > 127 || imm <= -128) { + *(*p)++ = 0x81; + } else { + *(*p)++ = 0x83; + } + + *(*p)++ = 0xc0 | (dst & 0x7); + + if (imm > 127 || imm <= -128) { + IMM32(p, imm); + } else { + IMM8(p, imm); + } } -void ADDI(uint8_t **p, uint8_t dst, int32_t imm) { - - if(dst >= 8) *(*p)++ = 0x49; - else *(*p)++ = 0x48; - - if(imm > 127 || imm <= -128) *(*p)++ = 0x81; - else *(*p)++ = 0x83; - - *(*p)++ = 0xc0 | (dst & 0x7); - - if(imm > 127 || imm <= -128) IMM32(p, imm); - else IMM8(p, imm); +void CALL(uint8_t **p, uint8_t *func) +{ + *(*p)++ = 0xe8; + IMM32(p, func - *p - 4); } -void CALL(uint8_t **p, uint8_t *func) { - *(*p)++ = 0xe8; - IMM32(p, ((void *)func) - (void *)(*p) - 4); +void PUSH(uint8_t **p, uint8_t reg) +{ + if (reg >= 8) { + *(*p)++ = 0x41; + } + + *(*p)++ = 0x50 | (reg & 7); } -void PUSH(uint8_t **p, uint8_t reg) { - if(reg >= 8) *(*p)++ = 0x41; - *(*p)++ = 0x50 | (reg & 7); +void POP(uint8_t **p, uint8_t reg) +{ + if (reg >= 8) { + *(*p)++ = 0x41; + } + + *(*p)++ = 0x58 | (reg & 7); } -void POP(uint8_t **p, uint8_t reg) { - if(reg >= 8) *(*p)++ = 0x41; - *(*p)++ = 0x58 | (reg & 7); + +void SHIFT(uint8_t **p, uint8_t reg, int shift) +{ + if (reg >= 8) { + *(*p)++ = 0x49; + } + + + *(*p)++ = 0xc1; + if (shift > 0) { + *(*p)++ = 0xe0 | (reg & 7); + *(*p)++ = (shift & 0xff); + } else { + *(*p)++ = 0xe8 | (reg & 7); + *(*p)++ = ((-shift) & 0xff); + } } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + +#endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file diff --git a/src/ffts.c b/src/ffts.c index b413c2b..9ceb97f 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -1,418 +1,618 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + #include "ffts.h" #include "macros.h" -//#include "mini_macros.h" #include "patterns.h" #include "ffts_small.h" #ifdef DYNAMIC_DISABLED - #include "ffts_static.h" +#include "ffts_static.h" #else - #include "codegen.h" +#include "codegen.h" #endif -#include - #include - #include - #include /* for PAGESIZE */ - #if __APPLE__ - #include +#include +#endif + +#if _WIN32 +#include #else +#include +#endif + +#ifdef __arm__ +static const FFTS_ALIGN(64) float w_data[16] = { + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + -0.70710678118654757273731092936941f, + -0.70710678118654746171500846685376f, + 1.0f, + 0.70710678118654757273731092936941f, + -0.0f, + -0.70710678118654746171500846685376f, + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + 1.0f, + 0.70710678118654757273731092936941f, + 0.0f, + 0.70710678118654746171500846685376f +}; #endif -void ffts_execute(ffts_plan_t *p, const void * in, void * out) { +static FFTS_INLINE int ffts_allow_execute(void *start, size_t len) +{ + int result; -//TODO: Define NEEDS_ALIGNED properly instead -#if defined(HAVE_SSE) || defined(HAVE_NEON) - if(((int)in % 16) != 0) { - LOG("ffts_execute: input buffer needs to be aligned to a 128bit boundary\n"); - } +#ifdef _WIN32 + DWORD old_protect; + result = !VirtualProtect(start, len, PAGE_EXECUTE_READ, &old_protect); +#else + result = mprotect(start, len, PROT_READ | PROT_EXEC); +#endif + + return result; +} + +static FFTS_INLINE int ffts_deny_execute(void *start, size_t len) +{ + int result; + +#ifdef _WIN32 + DWORD old_protect; + result = (int) VirtualProtect(start, len, PAGE_READWRITE, &old_protect); +#else + result = mprotect(start, len, PROT_READ | PROT_WRITE); +#endif + + return result; +} + +static FFTS_INLINE int ffts_flush_instruction_cache(void *start, size_t length) +{ +#ifdef __APPLE__ + sys_icache_invalidate(start, length); +#elif __ANDROID__ + cacheflush((long) start, (long) start + length, 0); +#elif __linux__ +#if GCC_VERSION_AT_LEAST(4,3) + __builtin___clear_cache(start, (char*) start + length); +#elif __GNUC__ + __clear_cache((long) start, (long) start + length); +#endif +#elif _WIN32 + return !FlushInstructionCache(GetCurrentProcess(), start, length); +#endif + return 0; +} - if(((int)out % 16) != 0) { - LOG("ffts_execute: output buffer needs to be aligned to a 128bit boundary\n"); - } +static FFTS_INLINE void *ffts_vmem_alloc(size_t length) +{ +#if __APPLE__ + return mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0); +#elif _WIN32 + return VirtualAlloc(NULL, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS 0x20 #endif - p->transform(p, (const float *)in, (float *)out); + return mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0); +#endif } -void ffts_free(ffts_plan_t *p) { - p->destroy(p); +static FFTS_INLINE void ffts_vmem_free(void *addr, size_t length) +{ +#ifdef _WIN32 + (void) length; + VirtualFree(addr, 0, MEM_RELEASE); +#else + munmap(addr, length); +#endif } -void ffts_free_1d(ffts_plan_t *p) { - - size_t i; +void ffts_execute(ffts_plan_t *p, const void *in, void *out) +{ + /* TODO: Define NEEDS_ALIGNED properly instead */ +#if defined(HAVE_SSE) || defined(HAVE_NEON) + if (((int) in % 16) != 0) { + LOG("ffts_execute: input buffer needs to be aligned to a 128bit boundary\n"); + } + + if (((int) out % 16) != 0) { + LOG("ffts_execute: output buffer needs to be aligned to a 128bit boundary\n"); + } +#endif + + p->transform(p, (const float*) in, (float*) out); +} - if(p->ws) { - FFTS_FREE(p->ws); - } - if(p->is) free(p->is); - if(p->ws_is) free(p->ws_is); - if(p->offsets) free(p->offsets); - //free(p->transforms); - if(p->transforms) free(p->transforms); +void ffts_free(ffts_plan_t *p) +{ + if (p) { + p->destroy(p); + } +} +void ffts_free_1d(ffts_plan_t *p) +{ #if !defined(DYNAMIC_DISABLED) - if(p->transform_base) { - if (mprotect(p->transform_base, p->transform_size, PROT_READ | PROT_WRITE)) { - perror("Couldn't mprotect"); - exit(errno); - } - munmap(p->transform_base, p->transform_size); - //free(p->transform_base); - } + if (p->transform_base) { + ffts_deny_execute(p->transform_base, p->transform_size); + ffts_vmem_free(p->transform_base, p->transform_size); + } #endif - free(p); + + if (p->ws_is) { + free(p->ws_is); + } + + if (p->ws) { + FFTS_FREE(p->ws); + } + + if (p->transforms) { + free(p->transforms); + } + + if (p->is) { + free(p->is); + } + + if (p->offsets) { + free(p->offsets); + } + + free(p); } -ffts_plan_t *ffts_init_1d(size_t N, int sign) { - if(N == 0 || (N & (N - 1)) != 0){ - LOG("FFT size must be a power of two\n"); - return NULL; - } +static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) +{ + V MULI_SIGN; + int hardcoded; + size_t lut_size; + size_t n_luts; + float *tmp; + cdata_t *w; + size_t i; + int n; + +#ifdef __arm__ + /* #ifdef HAVE_NEON */ + V MULI_SIGN; + + if(sign < 0) { + MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); + } else { + MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); + } + + /* #endif */ +#else + if (sign < 0) { + MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); + } else { + MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); + } +#endif + + /* LUTS */ + n_luts = ffts_ctzl(N / leaf_N); + if (N < 32) { + n_luts = ffts_ctzl(N / 4); + hardcoded = 1; + } else { + hardcoded = 0; + } + + if (n_luts >= 32) { + n_luts = 0; + } - ffts_plan_t *p = malloc(sizeof(ffts_plan_t)); - size_t leafN = 8; - size_t i; + /* fprintf(stderr, "n_luts = %zu\n", n_luts); */ + n = leaf_N * 2; + if (hardcoded) { + n = 8; + } + + lut_size = 0; + + for (i = 0; i < n_luts; i++) { + if (!i || hardcoded) { +#ifdef __arm__ + if (N <= 32) { + lut_size += n/4 * 2 * sizeof(cdata_t); + } else { + lut_size += n/4 * sizeof(cdata_t); + } +#else + lut_size += n/4 * 2 * sizeof(cdata_t); +#endif + n *= 2; + } else { #ifdef __arm__ -//#ifdef HAVE_NEON - V MULI_SIGN; - - if(sign < 0) MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); - else MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); -//#endif + lut_size += n/8 * 3 * sizeof(cdata_t); #else - V MULI_SIGN; - - if(sign < 0) MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); - else MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); + lut_size += n/8 * 3 * 2 * sizeof(cdata_t); +#endif + } + n *= 2; + } + + /* lut_size *= 16; */ + + /* fprintf(stderr, "lut size = %zu\n", lut_size); */ + + if (n_luts) { + p->ws = FFTS_MALLOC(lut_size, 32); + if (!p->ws) { + goto cleanup; + } + + p->ws_is = malloc(n_luts * sizeof(*p->ws_is)); + if (!p->ws_is) { + goto cleanup; + } + } + + w = p->ws; + + n = leaf_N * 2; + if (hardcoded) { + n = 8; + } + +#ifdef HAVE_NEON + V neg = (sign < 0) ? VLIT4(0.0f, 0.0f, 0.0f, 0.0f) : VLIT4(-0.0f, -0.0f, -0.0f, -0.0f); #endif - p->transform = NULL; - p->transform_base = NULL; - p->transforms = NULL; - p->is = NULL; - p->ws_is = NULL; - p->ws = NULL; - p->offsets = NULL; - p->destroy = ffts_free_1d; - - if(N >= 32) { - ffts_init_offsets(p, N, leafN); + for (i = 0; i < n_luts; i++) { + p->ws_is[i] = w - (cdata_t *)p->ws; + //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]); + + if(!i || hardcoded) { + cdata_t *w0 = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); + float *fw0 = (float*) w0; + float *fw = (float *)w; + + size_t j; + for (j = 0; j < n/4; j++) { + w0[j][0] = W_re(n,j); + w0[j][1] = W_im(n,j); + } + #ifdef __arm__ + if (N < 32) { + // w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32); + float *fw = (float *)w; + V temp0, temp1, temp2; + for (j=0; j0, im); +#else + im = MULI(sign>0, im); +#endif + VST(fw + j*4 , re); + VST(fw + j*4+4, im); + // #endif + } + w += n/4 * 2; + } else { + //w = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); + float *fw = (float *)w; #ifdef HAVE_NEON - ffts_init_is(p, N, leafN, 1); + VS temp0, temp1, temp2; + for (j=0; ji0 = N/leafN/3+1; - p->i1 = N/leafN/3; - if((N/leafN) % 3 > 1) p->i1++; - p->i2 = N/leafN/3; - - #ifdef __arm__ - #ifdef HAVE_NEON - p->i0/=2; - p->i1/=2; - #endif - #else - p->i0/=2; - p->i1/=2; - #endif - - }else{ - p->transforms = malloc(2 * sizeof(transform_index_t)); - p->transforms[0] = 0; - p->transforms[1] = 1; - if(N == 2) p->transform = &firstpass_2; - else if(N == 4 && sign == -1) p->transform = &firstpass_4_f; - else if(N == 4 && sign == 1) p->transform = &firstpass_4_b; - else if(N == 8 && sign == -1) p->transform = &firstpass_8_f; - else if(N == 8 && sign == 1) p->transform = &firstpass_8_b; - else if(N == 16 && sign == -1) p->transform = &firstpass_16_f; - else if(N == 16 && sign == 1) p->transform = &firstpass_16_b; - - p->is = NULL; - p->offsets = NULL; - } - - int hardcoded = 0; - - /* LUTS */ - size_t n_luts = __builtin_ctzl(N/leafN); - if(N < 32) { n_luts = __builtin_ctzl(N/4); hardcoded = 1; } - - if(n_luts >= 32) n_luts = 0; - -// fprintf(stderr, "n_luts = %zu\n", n_luts); - - cdata_t *w; - - int n = leafN*2; - if(hardcoded) n = 8; - - size_t lut_size = 0; - - for(i=0;iws = FFTS_MALLOC(lut_size,32); - p->ws_is = malloc(n_luts * sizeof(size_t)); - }else{ - p->ws = NULL; - p->ws_is = NULL; - } - w = p->ws; - - n = leafN*2; - if(hardcoded) n = 8; - - #ifdef HAVE_NEON - V neg = (sign < 0) ? VLIT4(0.0f, 0.0f, 0.0f, 0.0f) : VLIT4(-0.0f, -0.0f, -0.0f, -0.0f); - #endif - - for(i=0;iws_is[i] = w - (cdata_t *)p->ws; - //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]); - - if(!i || hardcoded) { - cdata_t *w0 = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); - - size_t j; - for(j=0;j0, im); - #else - im = MULI(sign>0, im); - #endif - VST(fw + j*4 , re); - VST(fw + j*4+4, im); - // #endif - } - w += n/4 * 2; - }else{ - //w = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); - float *fw = (float *)w; - #ifdef HAVE_NEON - VS temp0, temp1, temp2; - for(j=0;jws[i] = w; - - n *= 2; - } - - float *tmp = (float *)p->ws; - - if(sign < 0) { - p->oe_ws = (void *)(&w_data[4]); - p->ee_ws = (void *)(w_data); - p->eo_ws = (void *)(&w_data[4]); - }else{ - p->oe_ws = (void *)(w_data + 12); - p->ee_ws = (void *)(w_data + 8); - p->eo_ws = (void *)(w_data + 12); - } - - p->N = N; - p->lastlut = w; - p->n_luts = n_luts; -#ifdef DYNAMIC_DISABLED - if(sign < 0) { - if(N >= 32) p->transform = ffts_static_transform_f; - }else{ - if(N >= 32) p->transform = ffts_static_transform_i; - } + FFTS_FREE(w0); + } else { + cdata_t *w0 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + cdata_t *w1 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + cdata_t *w2 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + + float *fw0 = (float*) w0; + float *fw1 = (float*) w1; + float *fw2 = (float*) w2; + + float *fw = (float *)w; + V temp0, temp1, temp2, re, im; + + size_t j; + for (j = 0; j < n/8; j++) { + w0[j][0] = W_re((float) n, (float) 2*j); + w0[j][1] = W_im((float) n, (float) 2*j); + w1[j][0] = W_re((float) n, (float) j); + w1[j][1] = W_im((float) n, (float) j); + w2[j][0] = W_re((float) n, (float) (j + (n/8))); + w2[j][1] = W_im((float) n, (float) (j + (n/8))); + } + +#ifdef __arm__ + //w = FFTS_MALLOC(n/8 * 3 * sizeof(cdata_t), 32); + float *fw = (float *)w; +#ifdef HAVE_NEON + VS temp0, temp1, temp2; + for (j = 0; j < n/8; j += 4) { + temp0 = VLD2(fw0 + j*2); + temp0.val[1] = VXOR(temp0.val[1], neg); + STORESPR(fw + j*2*3, temp0); + temp1 = VLD2(fw1 + j*2); + temp1.val[1] = VXOR(temp1.val[1], neg); + STORESPR(fw + j*2*3 + 8, temp1); + temp2 = VLD2(fw2 + j*2); + temp2.val[1] = VXOR(temp2.val[1], neg); + STORESPR(fw + j*2*3 + 16, temp2); + } #else - if(N>=32) ffts_generate_func_code(p, N, leafN, sign); + for (j = 0; j < n/8; j += 1) { + fw[j*6] = fw0[j*2]; + fw[j*6+1] = (sign < 0) ? fw0[j*2+1] : -fw0[j*2+1]; + fw[j*6+2] = fw1[j*2+0]; + fw[j*6+3] = (sign < 0) ? fw1[j*2+1] : -fw1[j*2+1]; + fw[j*6+4] = fw2[j*2+0]; + fw[j*6+5] = (sign < 0) ? fw2[j*2+1] : -fw2[j*2+1]; + } #endif + w += n/8 * 3; +#else + //w = FFTS_MALLOC(n/8 * 3 * 2 * sizeof(cdata_t), 32); + for (j = 0; j < n/8; j += 2) { + temp0 = VLD(fw0 + j*2); + re = VDUPRE(temp0); + im = VDUPIM(temp0); + im = VXOR(im, MULI_SIGN); + VST(fw + j*2*6 , re); + VST(fw + j*2*6+4, im); + + temp1 = VLD(fw1 + j*2); + re = VDUPRE(temp1); + im = VDUPIM(temp1); + im = VXOR(im, MULI_SIGN); + VST(fw + j*2*6+8 , re); + VST(fw + j*2*6+12, im); + + temp2 = VLD(fw2 + j*2); + re = VDUPRE(temp2); + im = VDUPIM(temp2); + im = VXOR(im, MULI_SIGN); + VST(fw + j*2*6+16, re); + VST(fw + j*2*6+20, im); + } + + w += n/8 * 3 * 2; +#endif + + FFTS_FREE(w0); + FFTS_FREE(w1); + FFTS_FREE(w2); + } + ///p->ws[i] = w; + + n *= 2; + } + + tmp = (float *)p->ws; + +#ifdef __arm__ + if (sign < 0) { + p->oe_ws = (void*)(&w_data[4]); + p->ee_ws = (void*)(w_data); + p->eo_ws = (void*)(&w_data[4]); + } else { + p->oe_ws = (void*)(w_data + 12); + p->ee_ws = (void*)(w_data + 8); + p->eo_ws = (void*)(w_data + 12); + } +#endif + + p->lastlut = w; + p->n_luts = n_luts; + return 0; - return p; +cleanup: + return -1; } -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +ffts_plan_t *ffts_init_1d(size_t N, int sign) +{ + const size_t leaf_N = 8; + ffts_plan_t *p; + + if (N < 2 || (N & (N - 1)) != 0) { + LOG("FFT size must be a power of two\n"); + return NULL; + } + + p = calloc(1, sizeof(*p)); + if (!p) { + return NULL; + } + + p->destroy = ffts_free_1d; + p->N = N; + + if (ffts_generate_luts(p, N, leaf_N, sign)) { + goto cleanup; + } + + if (N >= 32) { + p->offsets = ffts_init_offsets(N, leaf_N); + if (!p->offsets) { + goto cleanup; + } + + p->is = ffts_init_is(N, leaf_N, 1); + if (!p->is) { + goto cleanup; + } + + p->i0 = N/leaf_N/3 + 1; + p->i1 = p->i2 = N/leaf_N/3; + if ((N/leaf_N) % 3 > 1) { + p->i1++; + } + +#ifdef __arm__ +#ifdef HAVE_NEON + p->i0 /= 2; + p->i1 /= 2; +#endif +#else + p->i0 /= 2; + p->i1 /= 2; +#endif + +#ifdef DYNAMIC_DISABLED + if (sign < 0) { + p->transform = ffts_static_transform_f; + } else { + p->transform = ffts_static_transform_i; + } +#else + /* determinate transform size */ +#ifdef __arm__ + if (N < 8192) { + p->transform_size = 8192; + } else { + p->transform_size = N; + } +#else + if (N < 2048) { + p->transform_size = 16384; + } else { + p->transform_size = 16384 + 2*N/8 * ffts_ctzl(N); + } +#endif + + /* allocate code/function buffer */ + p->transform_base = ffts_vmem_alloc(p->transform_size); + if (!p->transform_base) { + goto cleanup; + } + + /* generate code */ + p->transform = ffts_generate_func_code(p, N, leaf_N, sign); + if (!p->transform) { + goto cleanup; + } + + /* enable execution with read access for the block */ + if (ffts_allow_execute(p->transform_base, p->transform_size)) { + goto cleanup; + } + + /* flush from the instruction cache */ + if (ffts_flush_instruction_cache(p->transform_base, p->transform_size)) { + goto cleanup; + } +#endif + } else { + p->transforms = malloc(2 * sizeof(*p->transforms)); + if (!p->transforms) { + goto cleanup; + } + + p->transforms[0] = 0; + p->transforms[1] = 1; + + switch (N) { + case 2: + p->transform = &ffts_firstpass_2; + break; + case 4: + if (sign == -1) { + p->transform = &ffts_firstpass_4_f; + } else if (sign == 1) { + p->transform = &ffts_firstpass_4_b; + } + break; + case 8: + if (sign == -1) { + p->transform = &ffts_firstpass_8_f; + } else if (sign == 1) { + p->transform = &ffts_firstpass_8_b; + } + break; + case 16: + default: + if (sign == -1) { + p->transform = &ffts_firstpass_16_f; + } else { + p->transform = &ffts_firstpass_16_b; + } + break; + } + } + + return p; + +cleanup: + ffts_free_1d(p); + return NULL; +} \ No newline at end of file diff --git a/src/ffts.h b/src/ffts.h index cd9e24b..ca2951a 100644 --- a/src/ffts.h +++ b/src/ffts.h @@ -30,21 +30,21 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + #ifndef __CP_SSE_H__ #define __CP_SSE_H__ -#include "config.h" +//#include "config.h" +//#include "codegen.h" +#include "ffts_attributes.h" +#include "types.h" -#include -#include +#include #include #include #include -//#include - -//#include "codegen.h" -#include "types.h" - +#include +#include #ifdef __ANDROID__ #include @@ -53,29 +53,14 @@ #define LOG(s) fprintf(stderr, s) #endif -#define PI 3.1415926535897932384626433832795028841971693993751058209 - -static const __attribute__ ((aligned(64))) float w_data[16] = { - 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, - -0.70710678118654757273731092936941, -0.70710678118654746171500846685376, - 1.0f, 0.70710678118654757273731092936941f, - -0.0f, -0.70710678118654746171500846685376, - 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, - 0.70710678118654757273731092936941, 0.70710678118654746171500846685376, - 1.0f, 0.70710678118654757273731092936941f, - 0.0f, 0.70710678118654746171500846685376 -}; - -__INLINE float W_re(float N, float k) { return cos(-2.0f * PI * k / N); } -__INLINE float W_im(float N, float k) { return sin(-2.0f * PI * k / N); } - -typedef size_t transform_index_t; - -//typedef void (*transform_func_t)(float *data, size_t N, float *LUT); -typedef void (*transform_func_t)(float *data, size_t N, float *LUT); +#ifndef M_PI +#define M_PI 3.1415926535897932384626433832795028841971693993751058209 +#endif typedef struct _ffts_plan_t ffts_plan_t; +typedef void (*transform_func_t)(ffts_plan_t *p, const void *in, void *out); + /** * Contains all the Information need to perform FFT * @@ -86,101 +71,153 @@ typedef struct _ffts_plan_t ffts_plan_t; */ struct _ffts_plan_t { - /** - * - */ - ptrdiff_t *offsets; + /** + * + */ + ptrdiff_t *offsets; #ifdef DYNAMIC_DISABLED - /** - * Twiddle factors - */ - void *ws; - /** - * ee - 2 size x size8 - * oo - 2 x size4 in parallel - * oe - - */ - void *oe_ws, *eo_ws, *ee_ws; + /** + * Twiddle factors + */ + void *ws; + + /** + * ee - 2 size x size8 + * oo - 2 x size4 in parallel + * oe - + */ + void *oe_ws, *eo_ws, *ee_ws; #else - void __attribute__((aligned(32))) *ws; - void __attribute__((aligned(32))) *oe_ws, *eo_ws, *ee_ws; + void FFTS_ALIGN(32) *ws; + void FFTS_ALIGN(32) *oe_ws, *eo_ws, *ee_ws; #endif - /** - * Pointer into an array of precomputed indexes for the input data array - */ - ptrdiff_t *is; - - /** - * Twiddle Factor Indexes - */ - size_t *ws_is; - - /** - * Size of the loops for the base cases - */ - size_t i0, i1, n_luts; - - /** - * Size fo the Transform - */ - size_t N; - void *lastlut; - /** - * Used in multidimensional Code ?? - */ - transform_index_t *transforms; - //transform_func_t transform; - - /** - * Pointer to the dynamically generated function - * that will execute the FFT - */ - void (*transform)(ffts_plan_t * , const void * , void * ); - - /** - * Pointer to the base memory address of - * of the transform function - */ - void *transform_base; - - /** - * Size of the memory block contain the - * generated code - */ - size_t transform_size; - - /** - * Points to the cosnant variables used by - * the Assembly Code - */ - void *constants; - - // multi-dimensional stuff: - struct _ffts_plan_t **plans; - int rank; - size_t *Ns, *Ms; - void *buf; - - void *transpose_buf; - - /** - * Pointer to the destroy function - * to clean up the plan after use - * (differs for real and multi dimension transforms - */ - void (*destroy)(ffts_plan_t *); - - /** - * Coefficiants for the real valued transforms - */ - float *A, *B; - - size_t i2; + + /** + * Pointer into an array of precomputed indexes for the input data array + */ + ptrdiff_t *is; + + /** + * Twiddle Factor Indexes + */ + size_t *ws_is; + + /** + * Size of the loops for the base cases + */ + size_t i0, i1, n_luts; + + /** + * Size fo the Transform + */ + size_t N; + void *lastlut; + + /** + * Used in multidimensional Code ?? + */ + size_t *transforms; + + /** + * Pointer to the dynamically generated function + * that will execute the FFT + */ + transform_func_t transform; + + /** + * Pointer to the base memory address of + * of the transform function + */ + void *transform_base; + + /** + * Size of the memory block contain the + * generated code + */ + size_t transform_size; + + /** + * Points to the cosnant variables used by + * the Assembly Code + */ + void *constants; + + // multi-dimensional stuff: + struct _ffts_plan_t **plans; + int rank; + size_t *Ns, *Ms; + void *buf; + + void *transpose_buf; + + /** + * Pointer to the destroy function + * to clean up the plan after use + * (differs for real and multi dimension transforms + */ + void (*destroy)(ffts_plan_t *); + + /** + * Coefficiants for the real valued transforms + */ + float *A, *B; + + size_t i2; }; +static FFTS_INLINE void *ffts_aligned_malloc(size_t size) +{ +#if defined(_MSC_VER) + return _aligned_malloc(size, 32); +#else + return valloc(size); +#endif +} -void ffts_free(ffts_plan_t *); +static FFTS_INLINE void ffts_aligned_free(void *p) +{ +#if defined(_MSC_VER) + _aligned_free(p); +#else + free(p); +#endif +} + +#if GCC_VERSION_AT_LEAST(3,3) +#define ffts_ctzl __builtin_ctzl +#elif defined(_MSC_VER) +#include +#ifdef _M_AMD64 +#pragma intrinsic(_BitScanForward64) +static __inline unsigned long ffts_ctzl(size_t N) +{ + unsigned long count; + _BitScanForward64((unsigned long*) &count, N); + return count; +} +#else +#pragma intrinsic(_BitScanForward) +static __inline unsigned long ffts_ctzl(size_t N) +{ + unsigned long count; + _BitScanForward((unsigned long*) &count, N); + return count; +} +#endif /* _WIN64 */ +#endif /* _MSC_VER */ + +static FFTS_ALWAYS_INLINE float W_re(float N, float k) +{ + return cos(-2.0 * M_PI * k / N); +} + +static FFTS_ALWAYS_INLINE float W_im(float N, float k) +{ + return sin(-2.0 * M_PI * k / N); +} + +void ffts_free(ffts_plan_t *); +void ffts_execute(ffts_plan_t *, const void *, void *); ffts_plan_t *ffts_init_1d(size_t N, int sign); -void ffts_execute(ffts_plan_t *, const void *, void *); + #endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: diff --git a/src/macros-sse.h b/src/macros-sse.h index d845734..85cd02d 100644 --- a/src/macros-sse.h +++ b/src/macros-sse.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,8 +31,8 @@ */ -#ifndef __SSE_FLOAT_H__ -#define __SSE_FLOAT_H__ +#ifndef FFTS_MACROS_SSE_H +#define FFTS_MACROS_SSE_H #include @@ -63,23 +63,27 @@ typedef __m128 V; #define FFTS_MALLOC(d,a) (_mm_malloc(d,a)) #define FFTS_FREE(d) (_mm_free(d)) -__INLINE V IMULI(int inv, V a) { - if(inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); - else return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); +static FFTS_ALWAYS_INLINE V IMULI(int inv, V a) +{ + if (inv) { + return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); + } else { + return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); + } } - -__INLINE V IMUL(V d, V re, V im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); +static FFTS_ALWAYS_INLINE V IMUL(V d, V re, V im) +{ + re = VMUL(re, d); + im = VMUL(im, VSWAPPAIRS(d)); + return VSUB(re, im); } -__INLINE V IMULJ(V d, V re, V im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VADD(re, im); +static FFTS_ALWAYS_INLINE V IMULJ(V d, V re, V im) +{ + re = VMUL(re, d); + im = VMUL(im, VSWAPPAIRS(d)); + return VADD(re, im); } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_MACROS_SSE_H */ diff --git a/src/macros.h b/src/macros.h index 08029a3..12c52c6 100644 --- a/src/macros.h +++ b/src/macros.h @@ -1,38 +1,42 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2013, Michael J. Cree - Copyright (c) 2012, 2013, Anthony M. Blake - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2013, Michael J. Cree +Copyright (c) 2012, 2013, Anthony M. Blake + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __MACROS_H__ -#define __MACROS_H__ +#ifndef FFTS_MACROS_H +#define FFTS_MACROS_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif #ifdef HAVE_NEON #include "macros-neon.h" @@ -44,119 +48,138 @@ #include "macros-altivec.h" #endif #endif - #endif - #ifdef HAVE_VFP #include "macros-alpha.h" #endif + #ifdef HAVE_SSE - #include "macros-sse.h" +#include "macros-sse.h" #endif -static inline void TX2(V *a, V *b) +static FFTS_INLINE void TX2(V *a, V *b) { V TX2_t0 = VUNPACKLO(*a, *b); V TX2_t1 = VUNPACKHI(*a, *b); - *a = TX2_t0; *b = TX2_t1; + *a = TX2_t0; + *b = TX2_t1; } -static inline void K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) { V uk, uk2, zk_p, zk_n, zk, zk_d; - uk = *r0; uk2 = *r1; + + uk = *r0; + uk2 = *r1; + zk_p = IMUL(*r2, re, im); zk_n = IMULJ(*r3, re, im); - zk = VADD(zk_p, zk_n); + zk = VADD(zk_p, zk_n); zk_d = IMULI(inv, VSUB(zk_p, zk_n)); - + *r2 = VSUB(uk, zk); *r0 = VADD(uk, zk); *r3 = VADD(uk2, zk_d); *r1 = VSUB(uk2, zk_d); } - -static inline void S_4(V r0, V r1, V r2, V r3, - data_t * restrict o0, data_t * restrict o1, - data_t * restrict o2, data_t * restrict o3) -{ - VST(o0, r0); VST(o1, r1); VST(o2, r2); VST(o3, r3); -} - - -static inline void L_2_4(int inv, - const data_t * restrict i0, const data_t * restrict i1, - const data_t * restrict i2, const data_t * restrict i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void L_2_4(int inv, const data_t* FFTS_RESTRICT i0, const data_t* FFTS_RESTRICT i1, + const data_t* FFTS_RESTRICT i2, const data_t* FFTS_RESTRICT i3, + V *r0, V *r1, V *r2, V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3); + t0 = VLD(i0); + t1 = VLD(i1); + t2 = VLD(i2); + t3 = VLD(i3); + t4 = VADD(t0, t1); t5 = VSUB(t0, t1); t6 = VADD(t2, t3); t7 = VSUB(t2, t3); + *r0 = VUNPACKLO(t4, t5); *r1 = VUNPACKLO(t6, t7); + t5 = IMULI(inv, t5); + t0 = VADD(t6, t4); t2 = VSUB(t6, t4); t1 = VSUB(t7, t5); t3 = VADD(t7, t5); + *r3 = VUNPACKHI(t0, t1); *r2 = VUNPACKHI(t2, t3); } - -static inline void L_4_4(int inv, - const data_t * restrict i0, const data_t * restrict i1, - const data_t * restrict i2, const data_t * restrict i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void L_4_4(int inv, const data_t* FFTS_RESTRICT i0, const data_t* FFTS_RESTRICT i1, + const data_t* FFTS_RESTRICT i2, const data_t* FFTS_RESTRICT i3, + V *r0, V *r1, V *r2, V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; - - t0 = VLD(i0); t1 = VLD(i1); t2 = VLD(i2); t3 = VLD(i3); + + t0 = VLD(i0); + t1 = VLD(i1); + t2 = VLD(i2); + t3 = VLD(i3); + t4 = VADD(t0, t1); t5 = VSUB(t0, t1); t6 = VADD(t2, t3); + t7 = IMULI(inv, VSUB(t2, t3)); + t0 = VADD(t4, t6); t2 = VSUB(t4, t6); t1 = VSUB(t5, t7); t3 = VADD(t5, t7); + TX2(&t0, &t1); TX2(&t2, &t3); - *r0 = t0; *r2 = t1; *r1 = t2; *r3 = t3; -} - + *r0 = t0; + *r2 = t1; + *r1 = t2; + *r3 = t3; +} -static inline void L_4_2(int inv, - const data_t * restrict i0, const data_t * restrict i1, - const data_t * restrict i2, const data_t * restrict i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void L_4_2(int inv, const data_t * FFTS_RESTRICT i0, const data_t * FFTS_RESTRICT i1, + const data_t * FFTS_RESTRICT i2, const data_t * FFTS_RESTRICT i3, + V *r0, V *r1, V *r2, V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = VLD(i0); t1 = VLD(i1); t6 = VLD(i2); t7 = VLD(i3); + t0 = VLD(i0); + t1 = VLD(i1); + t6 = VLD(i2); + t7 = VLD(i3); + t2 = VBLEND(t6, t7); t3 = VBLEND(t7, t6); + t4 = VADD(t0, t1); t5 = VSUB(t0, t1); t6 = VADD(t2, t3); t7 = VSUB(t2, t3); + *r2 = VUNPACKHI(t4, t5); - *r3 = VUNPACKHI(t6, t7); + *r3 = VUNPACKHI(t6, t7); + t7 = IMULI(inv, t7); + t0 = VADD(t4, t6); t2 = VSUB(t4, t6); t1 = VSUB(t5, t7); t3 = VADD(t5, t7); + *r0 = VUNPACKLO(t0, t1); *r1 = VUNPACKLO(t2, t3); } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + +#define S_4(r0, r1, r2, r3, o0, o1, o2, o3) \ + VST(o0, r0); VST(o1, r1); VST(o2, r2); VST(o3, r3); + +#endif /* FFTS_MACROS_H */ \ No newline at end of file diff --git a/src/sse.s b/src/sse.s index 90f02db..ccdebc8 100644 --- a/src/sse.s +++ b/src/sse.s @@ -54,6 +54,7 @@ _leaf_ee_init: leaf_ee_init: #endif #lea L_sse_constants(%rip), %r9 + movq (%rdi), %r8 movq 0xe0(%rdi), %r9 xorl %eax, %eax @@ -559,9 +560,9 @@ x8_soft: leaq (%r14,%rcx,4), %r15 X8_soft_loop: movaps (%rsi), %xmm9 - movaps (%r10,%rax,4), %xmm6 + movaps (%r10,%rax,4), %xmm6 movaps %xmm9, %xmm11 - movaps (%r11,%rax,4), %xmm7 + movaps (%r11,%rax,4), %xmm7 movaps 16(%rsi), %xmm8 mulps %xmm6, %xmm11 mulps %xmm7, %xmm9 @@ -647,6 +648,14 @@ X8_soft_loop: ret #ifdef __APPLE__ + .globl _x8_soft_end +_x8_soft_end: +#else + .globl x8_soft_end +x8_soft_end: +#endif + +#ifdef __APPLE__ .globl _x8_hard _x8_hard: #else diff --git a/src/sse_win64.s b/src/sse_win64.s new file mode 100644 index 0000000..6b75391 --- /dev/null +++ b/src/sse_win64.s @@ -0,0 +1,840 @@ +/* + + This file is part of FFTS -- The Fastest Fourier Transform in the South + + Copyright (c) 2012, Anthony M. Blake + Copyright (c) 2012, The University of Waikato + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + + .code64 + + .globl _neon_x4 + .align 4 +_neon_x4: + + .globl _neon_x8 + .align 4 +_neon_x8: + + .globl _neon_x8_t + .align 4 +_neon_x8_t: + +#ifdef __APPLE__ + .globl _leaf_ee_init +_leaf_ee_init: +#else + .globl leaf_ee_init +leaf_ee_init: +#endif + +# rcx is a pointer to the ffts_plan +# eax is loop counter (init to 0) +# rbx is loop max count +# rdx is 'in' base pointer +# r8 is 'out' base pointer +# rdi is offsets pointer +# rsi is constants pointer +# scratch: rax r10 r11 + + xorl %eax, %eax + movq (%rcx), %rdi + movq 0xe0(%rcx), %rsi + +# _leaf_ee + 8 needs 16 byte alignment +#ifdef __APPLE__ + .globl _leaf_ee +_leaf_ee: +#else + .globl leaf_ee +leaf_ee: +#endif + movaps 32(%rsi), %xmm0 #83.5 + movaps (%rsi), %xmm8 #83.5 +LEAF_EE_1: +LEAF_EE_const_0: + movaps 0xFECA(%rdx,%rax,4), %xmm7 #83.5 +LEAF_EE_const_2: + movaps 0xFECA(%rdx,%rax,4), %xmm12 #83.5 + movaps %xmm7, %xmm6 #83.5 +LEAF_EE_const_3: + movaps 0xFECA(%rdx,%rax,4), %xmm10 #83.5 + movaps %xmm12, %xmm11 #83.5 + subps %xmm10, %xmm12 #83.5 + addps %xmm10, %xmm11 #83.5 + xorps %xmm8, %xmm12 #83.5 +LEAF_EE_const_1: + movaps 0xFECA(%rdx,%rax,4), %xmm9 #83.5 +LEAF_EE_const_4: + movaps 0xFECA(%rdx,%rax,4), %xmm10 #83.5 + addps %xmm9, %xmm6 #83.5 + subps %xmm9, %xmm7 #83.5 +LEAF_EE_const_5: + movaps 0xFECA(%rdx,%rax,4), %xmm13 #83.5 + movaps %xmm10, %xmm9 #83.5 +LEAF_EE_const_6: + movaps 0xFECA(%rdx,%rax,4), %xmm3 #83.5 + movaps %xmm6, %xmm5 #83.5 +LEAF_EE_const_7: + movaps 0xFECA(%rdx,%rax,4), %xmm14 #83.5 + movaps %xmm3, %xmm15 #83.5 + shufps $177, %xmm12, %xmm12 #83.5 + movaps %xmm7, %xmm4 #83.5 + movslq (%rdi, %rax, 4), %r10 #83.44 + subps %xmm13, %xmm10 #83.5 + subps %xmm14, %xmm3 #83.5 + addps %xmm11, %xmm5 #83.5 + subps %xmm11, %xmm6 #83.5 + subps %xmm12, %xmm4 #83.5 + addps %xmm12, %xmm7 #83.5 + addps %xmm13, %xmm9 #83.5 + addps %xmm14, %xmm15 #83.5 + movaps 16(%rsi), %xmm12 #83.5 + movaps %xmm9, %xmm1 #83.5 + movaps 16(%rsi), %xmm11 #83.5 + movaps %xmm5, %xmm2 #83.5 + mulps %xmm10, %xmm12 #83.5 + subps %xmm15, %xmm9 #83.5 + addps %xmm15, %xmm1 #83.5 + mulps %xmm3, %xmm11 #83.5 + addps %xmm1, %xmm2 #83.5 + subps %xmm1, %xmm5 #83.5 + shufps $177, %xmm10, %xmm10 #83.5 + xorps %xmm8, %xmm9 #83.5 + shufps $177, %xmm3, %xmm3 #83.5 + movaps %xmm6, %xmm1 #83.5 + mulps %xmm0, %xmm10 #83.5 + movaps %xmm4, %xmm13 #83.5 + mulps %xmm0, %xmm3 #83.5 + subps %xmm10, %xmm12 #83.5 + addps %xmm3, %xmm11 #83.5 + movaps %xmm12, %xmm3 #83.5 + movaps %xmm7, %xmm14 #83.5 + shufps $177, %xmm9, %xmm9 #83.5 + subps %xmm11, %xmm12 #83.5 + addps %xmm11, %xmm3 #83.5 + subps %xmm9, %xmm1 #83.5 + addps %xmm9, %xmm6 #83.5 + addps %xmm3, %xmm4 #83.5 + subps %xmm3, %xmm13 #83.5 + xorps %xmm8, %xmm12 #83.5 + movaps %xmm2, %xmm3 #83.5 + shufps $177, %xmm12, %xmm12 #83.5 + movaps %xmm6, %xmm9 #83.5 + movslq 8(%rdi, %rax, 4), %r11 #83.59 + movlhps %xmm4, %xmm3 #83.5 + addq $4, %rax + shufps $238, %xmm4, %xmm2 #83.5 + movaps %xmm1, %xmm4 #83.5 + subps %xmm12, %xmm7 #83.5 + addps %xmm12, %xmm14 #83.5 + movlhps %xmm7, %xmm4 #83.5 + shufps $238, %xmm7, %xmm1 #83.5 + movaps %xmm5, %xmm7 #83.5 + movlhps %xmm13, %xmm7 #83.5 + movlhps %xmm14, %xmm9 #83.5 + shufps $238, %xmm13, %xmm5 #83.5 + shufps $238, %xmm14, %xmm6 #83.5 + movaps %xmm3, (%r8,%r10,4) #83.5 + movaps %xmm4, 16(%r8,%r10,4) #83.5 + movaps %xmm7, 32(%r8,%r10,4) #83.5 + movaps %xmm9, 48(%r8,%r10,4) #83.5 + movaps %xmm2, (%r8,%r11,4) #83.5 + movaps %xmm1, 16(%r8,%r11,4) #83.5 + movaps %xmm5, 32(%r8,%r11,4) #83.5 + movaps %xmm6, 48(%r8,%r11,4) #83.5 + cmpq %rbx, %rax + jne LEAF_EE_1 + +# _leaf_oo + 3 needs to be 16 byte aligned +#ifdef __APPLE__ + .globl _leaf_oo +_leaf_oo: +#else + .globl leaf_oo +leaf_oo: +#endif + movaps (%rsi), %xmm5 #92.7 +LEAF_OO_1: +LEAF_OO_const_0: + movaps 0xFECA(%rdx,%rax,4), %xmm4 #93.5 + movaps %xmm4, %xmm6 #93.5 +LEAF_OO_const_1: + movaps 0xFECA(%rdx,%rax,4), %xmm7 #93.5 +LEAF_OO_const_2: + movaps 0xFECA(%rdx,%rax,4), %xmm10 #93.5 + addps %xmm7, %xmm6 #93.5 + subps %xmm7, %xmm4 #93.5 +LEAF_OO_const_3: + movaps 0xFECA(%rdx,%rax,4), %xmm8 #93.5 + movaps %xmm10, %xmm9 #93.5 +LEAF_OO_const_4: + movaps 0xFECA(%rdx,%rax,4), %xmm1 #93.5 + movaps %xmm6, %xmm3 #93.5 +LEAF_OO_const_5: + movaps 0xFECA(%rdx,%rax,4), %xmm11 #93.5 + movaps %xmm1, %xmm2 #93.5 +LEAF_OO_const_6: + movaps 0xFECA(%rdx,%rax,4), %xmm14 #93.5 + movaps %xmm4, %xmm15 #93.5 +LEAF_OO_const_7: + movaps 0xFECA(%rdx,%rax,4), %xmm12 #93.5 + movaps %xmm14, %xmm13 #93.5 + movslq (%rdi, %rax, 4), %r10 #83.44 + subps %xmm8, %xmm10 #93.5 + addps %xmm8, %xmm9 #93.5 + addps %xmm11, %xmm2 #93.5 + subps %xmm12, %xmm14 #93.5 + subps %xmm11, %xmm1 #93.5 + addps %xmm12, %xmm13 #93.5 + addps %xmm9, %xmm3 #93.5 + subps %xmm9, %xmm6 #93.5 + xorps %xmm5, %xmm10 #93.5 + xorps %xmm5, %xmm14 #93.5 + shufps $177, %xmm10, %xmm10 #93.5 + movaps %xmm2, %xmm9 #93.5 + shufps $177, %xmm14, %xmm14 #93.5 + movaps %xmm6, %xmm7 #93.5 + movslq 8(%rdi, %rax, 4), %r11 #83.59 + addq $4, %rax #92.18 + addps %xmm10, %xmm4 #93.5 + addps %xmm13, %xmm9 #93.5 + subps %xmm13, %xmm2 #93.5 + subps %xmm10, %xmm15 #93.5 + movaps %xmm1, %xmm13 #93.5 + movaps %xmm2, %xmm8 #93.5 + movlhps %xmm4, %xmm7 #93.5 + subps %xmm14, %xmm13 #93.5 + addps %xmm14, %xmm1 #93.5 + shufps $238, %xmm4, %xmm6 #93.5 + movaps %xmm3, %xmm14 #93.5 + movaps %xmm9, %xmm4 #93.5 + movlhps %xmm15, %xmm14 #93.5 + movlhps %xmm13, %xmm4 #93.5 + movlhps %xmm1, %xmm8 #93.5 + shufps $238, %xmm15, %xmm3 #93.5 + shufps $238, %xmm13, %xmm9 #93.5 + shufps $238, %xmm1, %xmm2 #93.5 + movaps %xmm14, (%r8,%r10,4) #93.5 + movaps %xmm7, 16(%r8,%r10,4) #93.5 + movaps %xmm4, 32(%r8,%r10,4) #93.5 + movaps %xmm8, 48(%r8,%r10,4) #93.5 + movaps %xmm3, (%r8,%r11,4) #93.5 + movaps %xmm6, 16(%r8,%r11,4) #93.5 + movaps %xmm9, 32(%r8,%r11,4) #93.5 + movaps %xmm2, 48(%r8,%r11,4) #93.5 + cmpq %rbx, %rax + jne LEAF_OO_1 # Prob 95% #92.14 + +#ifdef __APPLE__ + .globl _leaf_eo +_leaf_eo: +#else + .globl leaf_eo +leaf_eo: +#endif +LEAF_EO_const_0: + movaps 0xFECA(%rdx,%rax,4), %xmm9 #88.5 +LEAF_EO_const_2: + movaps 0xFECA(%rdx,%rax,4), %xmm7 #88.5 + movaps %xmm9, %xmm11 #88.5 +LEAF_EO_const_3: + movaps 0xFECA(%rdx,%rax,4), %xmm5 #88.5 + movaps %xmm7, %xmm6 #88.5 +LEAF_EO_const_1: + movaps 0xFECA(%rdx,%rax,4), %xmm4 #88.5 + subps %xmm5, %xmm7 #88.5 + addps %xmm4, %xmm11 #88.5 + subps %xmm4, %xmm9 #88.5 + addps %xmm5, %xmm6 #88.5 + movaps (%rsi), %xmm3 #88.5 + movaps %xmm11, %xmm10 #88.5 + xorps %xmm3, %xmm7 #88.5 + movaps %xmm9, %xmm8 #88.5 + shufps $177, %xmm7, %xmm7 #88.5 + addps %xmm6, %xmm10 #88.5 + subps %xmm6, %xmm11 #88.5 + subps %xmm7, %xmm8 #88.5 + addps %xmm7, %xmm9 #88.5 + movslq 8(%rdi, %rax, 4), %r11 #83.59 + movaps %xmm10, %xmm2 #88.5 + movslq (%rdi, %rax, 4), %r10 #83.44 + movaps %xmm11, %xmm1 #88.5 + shufps $238, %xmm8, %xmm10 #88.5 + shufps $238, %xmm9, %xmm11 #88.5 + movaps %xmm10, (%r8,%r11,4) #88.5 + movaps %xmm11, 16(%r8,%r11,4) #88.5 +LEAF_EO_const_4: + movaps 0xFECA(%rdx,%rax,4), %xmm15 #88.5 +LEAF_EO_const_5: + movaps 0xFECA(%rdx,%rax,4), %xmm12 #88.5 + movaps %xmm15, %xmm14 #88.5 +LEAF_EO_const_6: + movaps 0xFECA(%rdx,%rax,4), %xmm4 #88.5 + addps %xmm12, %xmm14 #88.5 + subps %xmm12, %xmm15 #88.5 +LEAF_EO_const_7: + movaps 0xFECA(%rdx,%rax,4), %xmm13 #88.5 + movaps %xmm4, %xmm5 #88.5 + movaps %xmm14, %xmm7 #88.5 + addps %xmm13, %xmm5 #88.5 + subps %xmm13, %xmm4 #88.5 + movlhps %xmm8, %xmm2 #88.5 + movaps %xmm5, %xmm8 #88.5 + movlhps %xmm15, %xmm7 #88.5 + xorps %xmm3, %xmm15 #88.5 + movaps %xmm5, %xmm6 #88.5 + subps %xmm14, %xmm5 #88.5 + addps %xmm14, %xmm6 #88.5 + movlhps %xmm9, %xmm1 #88.5 + movaps %xmm4, %xmm14 #88.5 + movlhps %xmm4, %xmm8 #88.5 + movaps %xmm1, %xmm12 #88.5 + shufps $177, %xmm15, %xmm15 #88.5 + movaps 0x30(%rsi), %xmm11 #88.5 + addq $4, %rax #90.5 + subps %xmm15, %xmm14 #88.5 + mulps %xmm7, %xmm11 #88.5 + addps %xmm15, %xmm4 #88.5 + movaps 0x30(%rsi), %xmm9 #88.5 + movaps 0x40(%rsi), %xmm15 #88.5 + shufps $177, %xmm7, %xmm7 #88.5 + mulps %xmm8, %xmm9 #88.5 + mulps %xmm15, %xmm7 #88.5 + shufps $177, %xmm8, %xmm8 #88.5 + subps %xmm7, %xmm11 #88.5 + mulps %xmm15, %xmm8 #88.5 + movaps %xmm11, %xmm10 #88.5 + addps %xmm8, %xmm9 #88.5 + shufps $238, %xmm14, %xmm6 #88.5 + subps %xmm9, %xmm11 #88.5 + addps %xmm9, %xmm10 #88.5 + xorps %xmm3, %xmm11 #88.5 + movaps %xmm2, %xmm3 #88.5 + shufps $177, %xmm11, %xmm11 #88.5 + subps %xmm10, %xmm3 #88.5 + addps %xmm10, %xmm2 #88.5 + addps %xmm11, %xmm12 #88.5 + subps %xmm11, %xmm1 #88.5 + shufps $238, %xmm4, %xmm5 #88.5 + movaps %xmm5, 48(%r8,%r11,4) #88.5 + movaps %xmm6, 32(%r8,%r11,4) #88.5 + movaps %xmm2, (%r8,%r10,4) #88.5 + movaps %xmm1, 16(%r8,%r10,4) #88.5 + movaps %xmm3, 32(%r8,%r10,4) #88.5 + movaps %xmm12, 48(%r8,%r10,4) #88.5 + +#ifdef __APPLE__ + .globl _leaf_oe +_leaf_oe: +#else + .globl leaf_oe +leaf_oe: +#endif + movaps (%rsi), %xmm0 #59.5 +LEAF_OE_const_2: + movaps 0xFECA(%rdx,%rax,4), %xmm6 #70.5 +LEAF_OE_const_3: + movaps 0xFECA(%rdx,%rax,4), %xmm8 #70.5 + movaps %xmm6, %xmm10 #70.5 + shufps $228, %xmm8, %xmm10 #70.5 + movaps %xmm10, %xmm9 #70.5 + shufps $228, %xmm6, %xmm8 #70.5 +LEAF_OE_const_0: + movaps 0xFECA(%rdx,%rax,4), %xmm12 #70.5 +LEAF_OE_const_1: + movaps 0xFECA(%rdx,%rax,4), %xmm7 #70.5 + movaps %xmm12, %xmm14 #70.5 + movslq (%rdi, %rax, 4), %r10 #83.44 + addps %xmm8, %xmm9 #70.5 + subps %xmm8, %xmm10 #70.5 + addps %xmm7, %xmm14 #70.5 + subps %xmm7, %xmm12 #70.5 + movaps %xmm9, %xmm4 #70.5 + movaps %xmm14, %xmm13 #70.5 + shufps $238, %xmm10, %xmm4 #70.5 + xorps %xmm0, %xmm10 #70.5 + shufps $177, %xmm10, %xmm10 #70.5 + movaps %xmm12, %xmm11 #70.5 + movaps %xmm14, %xmm5 #70.5 + addps %xmm9, %xmm13 #70.5 + subps %xmm10, %xmm11 #70.5 + subps %xmm9, %xmm14 #70.5 + shufps $238, %xmm12, %xmm5 #70.5 + addps %xmm10, %xmm12 #70.5 + movslq 8(%rdi, %rax, 4), %r11 #83.59 + movlhps %xmm11, %xmm13 #70.5 + movaps %xmm13, (%r8,%r10,4) #70.5 + movaps 0x30(%rsi), %xmm13 #70.5 + movlhps %xmm12, %xmm14 #70.5 + movaps 0x40(%rsi), %xmm12 #70.5 + mulps %xmm5, %xmm13 #70.5 + shufps $177, %xmm5, %xmm5 #70.5 + mulps %xmm12, %xmm5 #70.5 + movaps %xmm14, 16(%r8,%r10,4) #70.5 + subps %xmm5, %xmm13 #70.5 + movaps 0x30(%rsi), %xmm5 #70.5 + mulps %xmm4, %xmm5 #70.5 + shufps $177, %xmm4, %xmm4 #70.5 + mulps %xmm12, %xmm4 #70.5 +LEAF_OE_const_4: + movaps 0xFECA(%rdx,%rax,4), %xmm9 #70.5 + addps %xmm4, %xmm5 #70.5 +LEAF_OE_const_6: + movaps 0xFECA(%rdx,%rax,4), %xmm7 #70.5 + movaps %xmm9, %xmm3 #70.5 +LEAF_OE_const_7: + movaps 0xFECA(%rdx,%rax,4), %xmm2 #70.5 + movaps %xmm7, %xmm6 #70.5 +LEAF_OE_const_5: + movaps 0xFECA(%rdx,%rax,4), %xmm15 #70.5 + movaps %xmm13, %xmm4 #70.5 + subps %xmm2, %xmm7 #70.5 + addps %xmm15, %xmm3 #70.5 + subps %xmm15, %xmm9 #70.5 + addps %xmm2, %xmm6 #70.5 + subps %xmm5, %xmm13 #70.5 + addps %xmm5, %xmm4 #70.5 + xorps %xmm0, %xmm7 #70.5 + addq $4, %rax #72.5 + movaps %xmm3, %xmm2 #70.5 + shufps $177, %xmm7, %xmm7 #70.5 + movaps %xmm9, %xmm8 #70.5 + xorps %xmm0, %xmm13 #70.5 + addps %xmm6, %xmm2 #70.5 + subps %xmm7, %xmm8 #70.5 + subps %xmm6, %xmm3 #70.5 + addps %xmm7, %xmm9 #70.5 + movaps %xmm2, %xmm10 #70.5 + movaps %xmm3, %xmm11 #70.5 + shufps $238, %xmm8, %xmm2 #70.5 + shufps $238, %xmm9, %xmm3 #70.5 + movaps %xmm2, %xmm14 #70.5 + shufps $177, %xmm13, %xmm13 #70.5 + subps %xmm4, %xmm14 #70.5 + addps %xmm4, %xmm2 #70.5 + movaps %xmm3, %xmm4 #70.5 + subps %xmm13, %xmm3 #70.5 + addps %xmm13, %xmm4 #70.5 + movlhps %xmm8, %xmm10 #70.5 + movlhps %xmm9, %xmm11 #70.5 + movaps %xmm10, 32(%r8,%r10,4) #70.5 + movaps %xmm11, 48(%r8,%r10,4) #70.5 + movaps %xmm2, (%r8,%r11,4) #70.5 + movaps %xmm3, 16(%r8,%r11,4) #70.5 + movaps %xmm14, 32(%r8,%r11,4) #70.5 + movaps %xmm4, 48(%r8,%r11,4) #70.5 + +#ifdef __APPLE__ + .globl _leaf_end +_leaf_end: +#else + .globl leaf_end +leaf_end: +#endif + +#ifdef __APPLE__ + .globl _x_init +_x_init: +#else + .globl x_init +x_init: +#endif + movaps (%rsi), %xmm3 #34.3 + movq 0x20(%rcx), %rdi +#ifdef __APPLE__ + .globl _x4 +_x4: +#else + .globl x4 +x4: +#endif + movaps 64(%r8), %xmm0 #34.3 + movaps 96(%r8), %xmm1 #34.3 + movaps (%r8), %xmm7 #34.3 + movaps (%rdi), %xmm4 #const + movaps %xmm7, %xmm9 #34.3 + movaps %xmm4, %xmm6 #34.3 + movaps 16(%rdi), %xmm2 #const + mulps %xmm0, %xmm6 #34.3 + mulps %xmm1, %xmm4 #34.3 + shufps $177, %xmm0, %xmm0 #34.3 + shufps $177, %xmm1, %xmm1 #34.3 + mulps %xmm2, %xmm0 #34.3 + mulps %xmm1, %xmm2 #34.3 + subps %xmm0, %xmm6 #34.3 + addps %xmm2, %xmm4 #34.3 + movaps %xmm6, %xmm5 #34.3 + subps %xmm4, %xmm6 #34.3 + addps %xmm4, %xmm5 #34.3 + movaps 32(%r8), %xmm8 #34.3 + xorps %xmm3, %xmm6 #34.3 + shufps $177, %xmm6, %xmm6 #34.3 + movaps %xmm8, %xmm10 #34.3 + movaps 112(%r8), %xmm12 #34.3 + subps %xmm5, %xmm9 #34.3 + addps %xmm5, %xmm7 #34.3 + addps %xmm6, %xmm10 #34.3 + subps %xmm6, %xmm8 #34.3 + movaps %xmm7, (%r8) #34.3 + movaps %xmm8, 32(%r8) #34.3 + movaps %xmm9, 64(%r8) #34.3 + movaps %xmm10, 96(%r8) #34.3 + movaps 32(%rdi), %xmm14 #const #34.3 + movaps 80(%r8), %xmm11 #34.3 + movaps %xmm14, %xmm0 #34.3 + movaps 48(%rdi), %xmm13 #const #34.3 + mulps %xmm11, %xmm0 #34.3 + mulps %xmm12, %xmm14 #34.3 + shufps $177, %xmm11, %xmm11 #34.3 + shufps $177, %xmm12, %xmm12 #34.3 + mulps %xmm13, %xmm11 #34.3 + mulps %xmm12, %xmm13 #34.3 + subps %xmm11, %xmm0 #34.3 + addps %xmm13, %xmm14 #34.3 + movaps %xmm0, %xmm15 #34.3 + subps %xmm14, %xmm0 #34.3 + addps %xmm14, %xmm15 #34.3 + xorps %xmm3, %xmm0 #34.3 + movaps 16(%r8), %xmm1 #34.3 + movaps 48(%r8), %xmm2 #34.3 + movaps %xmm1, %xmm4 #34.3 + shufps $177, %xmm0, %xmm0 #34.3 + movaps %xmm2, %xmm5 #34.3 + addps %xmm15, %xmm1 #34.3 + subps %xmm0, %xmm2 #34.3 + subps %xmm15, %xmm4 #34.3 + addps %xmm0, %xmm5 #34.3 + movaps %xmm1, 16(%r8) #34.3 + movaps %xmm2, 48(%r8) #34.3 + movaps %xmm4, 80(%r8) #34.3 + movaps %xmm5, 112(%r8) #34.3 + ret + +# _x8_soft + 6 needs to be 16 byte aligned +#ifdef __APPLE__ + .globl _x8_soft +_x8_soft: +#else + .globl x8_soft +x8_soft: +#endif + # rax, rcx, rdx, r8, r10, r11 (r9 not used) + # rbx, rdi, rsi + + # input + movq %rdi, %rax + + # output + movq %r8, %rcx + + # loop stop (output + output_stride) + leaq (%r8, %rbx), %rdx + + # 3 * output_stride + leaq (%rbx, %rbx, 2), %rsi + + # 5 * output_stride + leaq (%rbx, %rbx, 4), %r10 + + # 7 * output_stride + leaq (%rsi, %rbx, 4), %r11 + +X8_soft_loop: + # input + 0 * input_stride + movaps (%rax), %xmm9 + + # output + 2 * output_stride + movaps (%rcx, %rbx, 2), %xmm6 + + movaps %xmm9, %xmm11 + + # output + 3 * output_stride + movaps (%rcx, %rsi), %xmm7 + + # input + 1 * input_stride + movaps 16(%rax), %xmm8 + + mulps %xmm6, %xmm11 + mulps %xmm7, %xmm9 + shufps $177, %xmm6, %xmm6 + mulps %xmm8, %xmm6 + shufps $177, %xmm7, %xmm7 + subps %xmm6, %xmm11 + mulps %xmm7, %xmm8 + movaps %xmm11, %xmm10 + addps %xmm8, %xmm9 + + # input + 2 * input_stride + movaps 32(%rax), %xmm15 + + addps %xmm9, %xmm10 + subps %xmm9, %xmm11 + + # output + 0 * output_stride + movaps (%rcx), %xmm5 + + movaps %xmm15, %xmm6 + + # output + 4 * output_stride + movaps (%rcx, %rbx, 4), %xmm12 + + movaps %xmm5, %xmm2 + + # output + 6 * output_stride + movaps (%rcx, %rsi, 2), %xmm13 + + xorps %xmm3, %xmm11 #const + + # input + 3 * input_stride + movaps 48(%rax), %xmm14 + + subps %xmm10, %xmm2 + mulps %xmm12, %xmm6 + addps %xmm10, %xmm5 + mulps %xmm13, %xmm15 + + # input + 4 * input_stride + movaps 64(%rax), %xmm10 + + movaps %xmm5, %xmm0 + shufps $177, %xmm12, %xmm12 + shufps $177, %xmm13, %xmm13 + mulps %xmm14, %xmm12 + mulps %xmm13, %xmm14 + subps %xmm12, %xmm6 + addps %xmm14, %xmm15 + + # output + 5 * output_stride + movaps (%rcx, %r10), %xmm7 + + movaps %xmm10, %xmm13 + + # output + 7 * output_stride + movaps (%rcx, %r11), %xmm8 + + movaps %xmm6, %xmm12 + + # input + 5 * input_stride + movaps 80(%rax), %xmm9 + + # input + 6 * input_stride + addq $96, %rax + + mulps %xmm7, %xmm13 + subps %xmm15, %xmm6 + addps %xmm15, %xmm12 + mulps %xmm8, %xmm10 + subps %xmm12, %xmm0 + addps %xmm12, %xmm5 + shufps $177, %xmm7, %xmm7 + xorps %xmm3, %xmm6 #const + shufps $177, %xmm8, %xmm8 + movaps %xmm2, %xmm12 + mulps %xmm9, %xmm7 + mulps %xmm8, %xmm9 + subps %xmm7, %xmm13 + addps %xmm9, %xmm10 + + # output + 1 * output_stride + movaps (%rcx, %rbx), %xmm4 + + shufps $177, %xmm11, %xmm11 + movaps %xmm4, %xmm1 + shufps $177, %xmm6, %xmm6 + addps %xmm11, %xmm1 + subps %xmm11, %xmm4 + addps %xmm6, %xmm12 + subps %xmm6, %xmm2 + movaps %xmm13, %xmm11 + movaps %xmm4, %xmm14 + movaps %xmm1, %xmm6 + subps %xmm10, %xmm13 + addps %xmm10, %xmm11 + xorps %xmm3, %xmm13 #const + addps %xmm11, %xmm4 + subps %xmm11, %xmm14 + shufps $177, %xmm13, %xmm13 + + # output + 0 * output_stride + movaps %xmm5, (%rcx) + + # output + 1 * output_stride + movaps %xmm4, (%rcx, %rbx) + + # output + 2 * output_stride + movaps %xmm2, (%rcx, %rbx, 2) + + subps %xmm13, %xmm1 + addps %xmm13, %xmm6 + + # output + 3 * output_stride + movaps %xmm1, (%rcx, %rsi) + + # output + 4 * output_stride + movaps %xmm0, (%rcx, %rbx, 4) + + # output + 5 * output_stride + movaps %xmm14, (%rcx, %r10) + + # output + 6 * output_stride + movaps %xmm12, (%rcx, %rsi, 2) + + # output + 7 * output_stride + movaps %xmm6, (%rcx, %r11) + + # output + 8 * output_stride + addq $16, %rcx + + cmpq %rdx, %rcx + jne X8_soft_loop + ret + +#ifdef __APPLE__ + .globl _x8_soft_end +_x8_soft_end: +#else + .globl x8_soft_end +x8_soft_end: + +#ifdef __APPLE__ + .globl _sse_leaf_ee_offsets + .globl _sse_leaf_oo_offsets + .globl _sse_leaf_eo_offsets + .globl _sse_leaf_oe_offsets + .align 4 +_sse_leaf_ee_offsets: + .long LEAF_EE_const_0-_leaf_ee+0x4 + .long LEAF_EE_const_1-_leaf_ee+0x5 + .long LEAF_EE_const_2-_leaf_ee+0x5 + .long LEAF_EE_const_3-_leaf_ee+0x5 + .long LEAF_EE_const_4-_leaf_ee+0x5 + .long LEAF_EE_const_5-_leaf_ee+0x5 + .long LEAF_EE_const_6-_leaf_ee+0x4 + .long LEAF_EE_const_7-_leaf_ee+0x5 +_sse_leaf_oo_offsets: + .long LEAF_OO_const_0-_leaf_oo+0x4 + .long LEAF_OO_const_1-_leaf_oo+0x4 + .long LEAF_OO_const_2-_leaf_oo+0x5 + .long LEAF_OO_const_3-_leaf_oo+0x5 + .long LEAF_OO_const_4-_leaf_oo+0x4 + .long LEAF_OO_const_5-_leaf_oo+0x5 + .long LEAF_OO_const_6-_leaf_oo+0x5 + .long LEAF_OO_const_7-_leaf_oo+0x5 +_sse_leaf_eo_offsets: + .long LEAF_EO_const_0-_leaf_eo+0x5 + .long LEAF_EO_const_1-_leaf_eo+0x4 + .long LEAF_EO_const_2-_leaf_eo+0x4 + .long LEAF_EO_const_3-_leaf_eo+0x4 + .long LEAF_EO_const_4-_leaf_eo+0x5 + .long LEAF_EO_const_5-_leaf_eo+0x5 + .long LEAF_EO_const_6-_leaf_eo+0x4 + .long LEAF_EO_const_7-_leaf_eo+0x5 +_sse_leaf_oe_offsets: + .long LEAF_OE_const_0-_leaf_oe+0x5 + .long LEAF_OE_const_1-_leaf_oe+0x4 + .long LEAF_OE_const_2-_leaf_oe+0x4 + .long LEAF_OE_const_3-_leaf_oe+0x5 + .long LEAF_OE_const_4-_leaf_oe+0x5 + .long LEAF_OE_const_5-_leaf_oe+0x5 + .long LEAF_OE_const_6-_leaf_oe+0x4 + .long LEAF_OE_const_7-_leaf_oe+0x4 +#else + .globl sse_leaf_ee_offsets + .globl sse_leaf_oo_offsets + .globl sse_leaf_eo_offsets + .globl sse_leaf_oe_offsets + .align 4 +sse_leaf_ee_offsets: + .long LEAF_EE_const_0-leaf_ee+0x4 + .long LEAF_EE_const_1-leaf_ee+0x5 + .long LEAF_EE_const_2-leaf_ee+0x5 + .long LEAF_EE_const_3-leaf_ee+0x5 + .long LEAF_EE_const_4-leaf_ee+0x5 + .long LEAF_EE_const_5-leaf_ee+0x5 + .long LEAF_EE_const_6-leaf_ee+0x4 + .long LEAF_EE_const_7-leaf_ee+0x5 +sse_leaf_oo_offsets: + .long LEAF_OO_const_0-leaf_oo+0x4 + .long LEAF_OO_const_1-leaf_oo+0x4 + .long LEAF_OO_const_2-leaf_oo+0x5 + .long LEAF_OO_const_3-leaf_oo+0x5 + .long LEAF_OO_const_4-leaf_oo+0x4 + .long LEAF_OO_const_5-leaf_oo+0x5 + .long LEAF_OO_const_6-leaf_oo+0x5 + .long LEAF_OO_const_7-leaf_oo+0x5 +sse_leaf_eo_offsets: + .long LEAF_EO_const_0-leaf_eo+0x5 + .long LEAF_EO_const_1-leaf_eo+0x4 + .long LEAF_EO_const_2-leaf_eo+0x4 + .long LEAF_EO_const_3-leaf_eo+0x4 + .long LEAF_EO_const_4-leaf_eo+0x5 + .long LEAF_EO_const_5-leaf_eo+0x5 + .long LEAF_EO_const_6-leaf_eo+0x4 + .long LEAF_EO_const_7-leaf_eo+0x5 +sse_leaf_oe_offsets: + .long LEAF_OE_const_0-leaf_oe+0x5 + .long LEAF_OE_const_1-leaf_oe+0x4 + .long LEAF_OE_const_2-leaf_oe+0x4 + .long LEAF_OE_const_3-leaf_oe+0x5 + .long LEAF_OE_const_4-leaf_oe+0x5 + .long LEAF_OE_const_5-leaf_oe+0x5 + .long LEAF_OE_const_6-leaf_oe+0x4 + .long LEAF_OE_const_7-leaf_oe+0x4 +#endif + +#ifdef __APPLE__ + .data +#else + .section .data +#endif + .p2align 4 +#ifdef __APPLE__ + .globl _sse_constants +_sse_constants: +#else + .globl sse_constants +sse_constants: +#endif + .long 0x00000000,0x80000000,0x00000000,0x80000000 + .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 + .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3 + .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 + .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3 +#ifdef __APPLE__ + .globl _sse_constants_inv +_sse_constants_inv: +#else + .globl sse_constants_inv +sse_constants_inv: +#endif + .long 0x80000000,0x00000000,0x80000000,0x00000000 + .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 + .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3 + .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 + .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3 -- cgit v1.1 From 8011f28cf0cb592251f5e95accbd8cf21e28f0c7 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 31 Oct 2014 18:03:41 +0200 Subject: Add building instructions for Windows x64 --- README | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README b/README index d2f320b..28224a5 100644 --- a/README +++ b/README @@ -10,6 +10,12 @@ To build for Linux or OS X on x86, run make make install +To build for Windows x64 with MSVC 2005 and YASM v1.3, run + mkdir build + cmake .. -G "Visual Studio 8 2005 Win64" + +Note that 32 bit Windows is not supported at the moment. + FFTS dynamically generates code at runtime. This can be disabled with --disable-dynamic-code -- cgit v1.1 From b4ec2061aab28f7cc626f36a3d8324eebeaab88a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 1 Nov 2014 11:16:56 +0200 Subject: XMM6:XMM15 Nonvolatile, must be preserved as needed by callee. http://msdn.microsoft.com/en-us/library/9z1stfyw(v=vs.80).aspx --- CMakeLists.txt | 24 ++++--- src/codegen.c | 206 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- src/codegen_sse.h | 34 +++++++-- src/sse_win64.s | 14 ++-- 4 files changed, 252 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 365ec32..548a462 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,15 +31,16 @@ option(ENABLE_SHARED "Enable building a shared library." OFF ) -#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -pipe -Wall") add_definitions(-DFFTS_CMAKE_GENERATED) -include(CheckIncludeFile) -include(CheckLibraryExists) - if(MSVC) add_definitions(-D_USE_MATH_DEFINES) else() + include(CheckIncludeFile) + include(CheckLibraryExists) + + #set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -pipe -Wall") + # some systems need libm for some of the math functions to work check_library_exists(m pow "" HAVE_LIBM) if(HAVE_LIBM) @@ -69,6 +70,9 @@ set(FFTS_SOURCES ) if(ENABLE_SSE) + add_definitions(-DHAVE_SSE) + add_definitions(-D__x86_64__) + list(APPEND FFTS_SOURCES src/macros-sse.h ) @@ -95,11 +99,9 @@ if(ENABLE_SSE) list(APPEND FFTS_SOURCES src/sse.s ) + else() + add_definitions(-msse2) endif(MSVC) - - add_definitions(-D_USE_MATH_DEFINES) - add_definitions(-D__x86_64__) - add_definitions(-DHAVE_SSE -msse2) endif() if(ENABLE_NEON) @@ -142,6 +144,12 @@ else() src/codegen.c src/codegen.h ) + + if(ENABLE_SSE) + list(APPEND FFTS_SOURCES + src/codegen_sse.h + ) + endif(ENABLE_SSE) endif() add_library(ffts_static diff --git a/src/codegen.c b/src/codegen.c index 0cc3d24..af5deda 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -385,6 +385,104 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N *fp++ = 0x7C; *fp++ = 0x24; *fp++ = 0x18; + + /* reserve space for XMM6-XMM15 registers*/ + + /* sub rsp, 168 */ + SUBI(&fp, RSP, 168); + + /* movdqa [rsp], xmm6 */ + *fp++ = 0x66; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x34; + *fp++ = 0x24; + + /* movdqa [rsp + 16], xmm7 */ + *fp++ = 0x66; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x7C; + *fp++ = 0x24; + *fp++ = 0x10; + + /* movdqa [rsp + 32], xmm8 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x44; + *fp++ = 0x24; + *fp++ = 0x20; + + /* movdqa [rsp + 48], xmm9 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x4C; + *fp++ = 0x24; + *fp++ = 0x30; + + /* movdqa [rsp + 64], xmm10 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x54; + *fp++ = 0x24; + *fp++ = 0x40; + + /* movdqa [rsp + 80], xmm11 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x5C; + *fp++ = 0x24; + *fp++ = 0x50; + + /* movdqa [rsp + 96], xmm12 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x64; + *fp++ = 0x24; + *fp++ = 0x60; + + /* movdqa [rsp + 112], xmm13 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0x6C; + *fp++ = 0x24; + *fp++ = 0x70; + + /* movdqa [rsp + 128], xmm14 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0xB4; + *fp++ = 0x24; + *fp++ = 0x80; + *fp++ = 0x00; + *fp++ = 0x00; + *fp++ = 0x00; + + /* movdqa [rsp + 144], xmm15 */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x7F; + *fp++ = 0xBC; + *fp++ = 0x24; + *fp++ = 0x90; + *fp++ = 0x00; + *fp++ = 0x00; + *fp++ = 0x00; #else PUSH(&fp, RBP); PUSH(&fp, RBX); @@ -575,11 +673,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += len; } - //fprintf(stderr, "Body start address = %016p\n", fp); - //LEA(&fp, R8, RDI, ((uint32_t)&p->ws) - ((uint32_t)p)); - memcpy(fp, x_init, (char*) x4 - (char*) x_init); - //IMM32_NI(fp + 3, ((int64_t)READ_IMM32(fp + 3)) + ((void *)x_init - (void *)fp )); - fp += ((char*) x4 - (char*) x_init); + assert((char*) x4 > (char*) x_init); + len = (char*) x4 - (char*) x_init; + memcpy(fp, x_init, len); + fp += len; count = 2; while (pps[0]) { @@ -927,7 +1024,104 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* restore nonvolatile registers */ #ifdef _M_AMD64 - /* mov rbx, [rsp + 8] */ + + /* movdqa xmm6, [rsp] */ + *fp++ = 0x66; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x34; + *fp++ = 0x24; + + /* movdqa xmm7, [rsp + 16] */ + *fp++ = 0x66; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x7C; + *fp++ = 0x24; + *fp++ = 0x10; + + /* movdqa xmm8, [rsp + 32] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x44; + *fp++ = 0x24; + *fp++ = 0x20; + + /* movdqa xmm9, [rsp + 48] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x4C; + *fp++ = 0x24; + *fp++ = 0x30; + + /* movdqa xmm10, [rsp + 64] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x54; + *fp++ = 0x24; + *fp++ = 0x40; + + /* movdqa xmm11, [rsp + 80] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x5C; + *fp++ = 0x24; + *fp++ = 0x50; + + /* movdqa xmm12, [rsp + 96] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x64; + *fp++ = 0x24; + *fp++ = 0x60; + + /* movdqa xmm13 , [rsp + 112] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0x6C; + *fp++ = 0x24; + *fp++ = 0x70; + + /* movdqa xmm14, [rsp + 128] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0xB4; + *fp++ = 0x24; + *fp++ = 0x80; + *fp++ = 0x00; + *fp++ = 0x00; + *fp++ = 0x00; + + /* movdqa xmm15, [rsp + 144] */ + *fp++ = 0x66; + *fp++ = 0x44; + *fp++ = 0x0F; + *fp++ = 0x6F; + *fp++ = 0xBC; + *fp++ = 0x24; + *fp++ = 0x90; + *fp++ = 0x00; + *fp++ = 0x00; + *fp++ = 0x00; + + /* add rsp, 168 */ + ADDI(&fp, RSP, 168); + + /* mov rbx, [rsp + 8] */ *fp++ = 0x48; *fp++ = 0x8B; *fp++ = 0x5C; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index a63d21d..269d142 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -31,7 +31,6 @@ */ - #ifndef FFTS_CODEGEN_SSE_H #define FFTS_CODEGEN_SSE_H @@ -63,19 +62,21 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define ECX 1 #define EDX 2 #define EBX 3 +#define ESP 4 +#define EBP 5 #define ESI 6 #define EDI 7 -#define EBP 5 #define RAX 0 #define RCX 1 #define RDX 2 #define RBX 3 +#define RSP 4 +#define RBP 5 #define RSI 6 #define RDI 7 -#define RBP 5 -#define R8 8 -#define R9 9 +#define R8 8 +#define R9 9 #define R10 10 #define R11 11 #define R12 12 @@ -185,6 +186,29 @@ void ADDI(uint8_t **p, uint8_t dst, int32_t imm) } } +void SUBI(uint8_t **p, uint8_t dst, int32_t imm) +{ + if (dst >= 8) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } + + if (imm > 127 || imm <= -128) { + *(*p)++ = 0x81; + } else { + *(*p)++ = 0x83; + } + + *(*p)++ = 0xe8 | (dst & 0x7); + + if (imm > 127 || imm <= -128) { + IMM32(p, imm); + } else { + IMM8(p, imm); + } +} + void CALL(uint8_t **p, uint8_t *func) { *(*p)++ = 0xe8; diff --git a/src/sse_win64.s b/src/sse_win64.s index 6b75391..2aa76cd 100644 --- a/src/sse_win64.s +++ b/src/sse_win64.s @@ -491,20 +491,20 @@ x4: addps %xmm2, %xmm4 #34.3 movaps %xmm6, %xmm5 #34.3 subps %xmm4, %xmm6 #34.3 - addps %xmm4, %xmm5 #34.3 - movaps 32(%r8), %xmm8 #34.3 + addps %xmm4, %xmm5 #34.3 + movaps 32(%r8), %xmm8 #34.3 xorps %xmm3, %xmm6 #34.3 - shufps $177, %xmm6, %xmm6 #34.3 + shufps $177, %xmm6, %xmm6 #34.3 movaps %xmm8, %xmm10 #34.3 - movaps 112(%r8), %xmm12 #34.3 + movaps 112(%r8), %xmm12 #34.3 subps %xmm5, %xmm9 #34.3 addps %xmm5, %xmm7 #34.3 addps %xmm6, %xmm10 #34.3 - subps %xmm6, %xmm8 #34.3 + subps %xmm6, %xmm8 #34.3 movaps %xmm7, (%r8) #34.3 movaps %xmm8, 32(%r8) #34.3 movaps %xmm9, 64(%r8) #34.3 - movaps %xmm10, 96(%r8) #34.3 + movaps %xmm10, 96(%r8) #34.3 movaps 32(%rdi), %xmm14 #const #34.3 movaps 80(%r8), %xmm11 #34.3 movaps %xmm14, %xmm0 #34.3 @@ -516,7 +516,7 @@ x4: mulps %xmm13, %xmm11 #34.3 mulps %xmm12, %xmm13 #34.3 subps %xmm11, %xmm0 #34.3 - addps %xmm13, %xmm14 #34.3 + addps %xmm13, %xmm14 #34.3 movaps %xmm0, %xmm15 #34.3 subps %xmm14, %xmm0 #34.3 addps %xmm14, %xmm15 #34.3 -- cgit v1.1 From 14c88113349263fafc88a671a71facca7e177dc9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 3 Nov 2014 10:46:09 +0200 Subject: MOVDQA "intrinsic", two operand MOVDQA2, three operand MOVDQA3 helpers --- src/codegen.c | 217 +++++++----------------------------------------------- src/codegen_sse.h | 126 +++++++++++++++++++++++++++---- 2 files changed, 136 insertions(+), 207 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index af5deda..4e524ca 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -386,103 +386,19 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N *fp++ = 0x24; *fp++ = 0x18; - /* reserve space for XMM6-XMM15 registers*/ - - /* sub rsp, 168 */ + /* reserve space to save XMM6-XMM15 registers */ SUBI(&fp, RSP, 168); - /* movdqa [rsp], xmm6 */ - *fp++ = 0x66; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x34; - *fp++ = 0x24; - - /* movdqa [rsp + 16], xmm7 */ - *fp++ = 0x66; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x7C; - *fp++ = 0x24; - *fp++ = 0x10; - - /* movdqa [rsp + 32], xmm8 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x44; - *fp++ = 0x24; - *fp++ = 0x20; - - /* movdqa [rsp + 48], xmm9 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x4C; - *fp++ = 0x24; - *fp++ = 0x30; - - /* movdqa [rsp + 64], xmm10 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x54; - *fp++ = 0x24; - *fp++ = 0x40; - - /* movdqa [rsp + 80], xmm11 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x5C; - *fp++ = 0x24; - *fp++ = 0x50; - - /* movdqa [rsp + 96], xmm12 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x64; - *fp++ = 0x24; - *fp++ = 0x60; - - /* movdqa [rsp + 112], xmm13 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0x6C; - *fp++ = 0x24; - *fp++ = 0x70; - - /* movdqa [rsp + 128], xmm14 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0xB4; - *fp++ = 0x24; - *fp++ = 0x80; - *fp++ = 0x00; - *fp++ = 0x00; - *fp++ = 0x00; - - /* movdqa [rsp + 144], xmm15 */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x7F; - *fp++ = 0xBC; - *fp++ = 0x24; - *fp++ = 0x90; - *fp++ = 0x00; - *fp++ = 0x00; - *fp++ = 0x00; + MOVDQA3(&fp, RSP, 0, XMM6); + MOVDQA3(&fp, RSP, 16, XMM7); + MOVDQA3(&fp, RSP, 32, XMM8); + MOVDQA3(&fp, RSP, 48, XMM9); + MOVDQA3(&fp, RSP, 64, XMM10); + MOVDQA3(&fp, RSP, 80, XMM11); + MOVDQA3(&fp, RSP, 96, XMM12); + MOVDQA3(&fp, RSP, 112, XMM13); + MOVDQA3(&fp, RSP, 128, XMM14); + MOVDQA3(&fp, RSP, 144, XMM15); #else PUSH(&fp, RBP); PUSH(&fp, RBX); @@ -1022,105 +938,24 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N count++; #else - /* restore nonvolatile registers */ #ifdef _M_AMD64 - - /* movdqa xmm6, [rsp] */ - *fp++ = 0x66; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x34; - *fp++ = 0x24; - - /* movdqa xmm7, [rsp + 16] */ - *fp++ = 0x66; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x7C; - *fp++ = 0x24; - *fp++ = 0x10; - - /* movdqa xmm8, [rsp + 32] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x44; - *fp++ = 0x24; - *fp++ = 0x20; - - /* movdqa xmm9, [rsp + 48] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x4C; - *fp++ = 0x24; - *fp++ = 0x30; - - /* movdqa xmm10, [rsp + 64] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x54; - *fp++ = 0x24; - *fp++ = 0x40; - - /* movdqa xmm11, [rsp + 80] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x5C; - *fp++ = 0x24; - *fp++ = 0x50; - - /* movdqa xmm12, [rsp + 96] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x64; - *fp++ = 0x24; - *fp++ = 0x60; - - /* movdqa xmm13 , [rsp + 112] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0x6C; - *fp++ = 0x24; - *fp++ = 0x70; - - /* movdqa xmm14, [rsp + 128] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0xB4; - *fp++ = 0x24; - *fp++ = 0x80; - *fp++ = 0x00; - *fp++ = 0x00; - *fp++ = 0x00; - - /* movdqa xmm15, [rsp + 144] */ - *fp++ = 0x66; - *fp++ = 0x44; - *fp++ = 0x0F; - *fp++ = 0x6F; - *fp++ = 0xBC; - *fp++ = 0x24; - *fp++ = 0x90; - *fp++ = 0x00; - *fp++ = 0x00; - *fp++ = 0x00; - - /* add rsp, 168 */ + /* restore nonvolatile registers */ + MOVDQA3(&fp, XMM6, RSP, 0); + MOVDQA3(&fp, XMM7, RSP, 16); + MOVDQA3(&fp, XMM8, RSP, 32); + MOVDQA3(&fp, XMM9, RSP, 48); + MOVDQA3(&fp, XMM10, RSP, 64); + MOVDQA3(&fp, XMM11, RSP, 80); + MOVDQA3(&fp, XMM12, RSP, 96); + MOVDQA3(&fp, XMM13, RSP, 112); + MOVDQA3(&fp, XMM14, RSP, 128); + MOVDQA3(&fp, XMM15, RSP, 144); + + /* restore stack */ ADDI(&fp, RSP, 168); + /* restore the last 3 registers from the shadow space */ + /* mov rbx, [rsp + 8] */ *fp++ = 0x48; *fp++ = 0x8B; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 269d142..6690b92 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -34,6 +34,8 @@ #ifndef FFTS_CODEGEN_SSE_H #define FFTS_CODEGEN_SSE_H +#include + void neon_x4(float *, size_t, float *); void neon_x8(float *, size_t, float *); void neon_x8_t(float *, size_t, float *); @@ -84,12 +86,31 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define R14 14 #define R15 15 -void IMM8(uint8_t **p, int32_t imm) +#define XMM_REG 0x40 + +#define XMM0 (XMM_REG | 0x0) +#define XMM1 (XMM_REG | 0x1) +#define XMM2 (XMM_REG | 0x2) +#define XMM3 (XMM_REG | 0x3) +#define XMM4 (XMM_REG | 0x4) +#define XMM5 (XMM_REG | 0x5) +#define XMM6 (XMM_REG | 0x6) +#define XMM7 (XMM_REG | 0x7) +#define XMM8 (XMM_REG | 0x8) +#define XMM9 (XMM_REG | 0x9) +#define XMM10 (XMM_REG | 0xa) +#define XMM11 (XMM_REG | 0xb) +#define XMM12 (XMM_REG | 0xc) +#define XMM13 (XMM_REG | 0xd) +#define XMM14 (XMM_REG | 0xe) +#define XMM15 (XMM_REG | 0xf) + +static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); } -void IMM16(uint8_t **p, int32_t imm) +static void IMM16(uint8_t **p, int32_t imm) { int i; @@ -98,7 +119,7 @@ void IMM16(uint8_t **p, int32_t imm) } } -void IMM32(uint8_t **p, int32_t imm) +static void IMM32(uint8_t **p, int32_t imm) { int i; @@ -107,7 +128,7 @@ void IMM32(uint8_t **p, int32_t imm) } } -void IMM32_NI(uint8_t *p, int32_t imm) +static void IMM32_NI(uint8_t *p, int32_t imm) { int i; @@ -116,7 +137,7 @@ void IMM32_NI(uint8_t *p, int32_t imm) } } -int32_t READ_IMM32(uint8_t *p) +static int32_t READ_IMM32(uint8_t *p) { int32_t rval = 0; int i; @@ -128,7 +149,7 @@ int32_t READ_IMM32(uint8_t *p) return rval; } -void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) +static void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) { if (dst >= 8) { *(*p)++ = 0x41; @@ -138,7 +159,7 @@ void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) IMM32(p, imm); } -void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) +static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) { if (disp == 0) { *(*p)++ = (rm & 7) | ((reg & 7) << 3); @@ -151,19 +172,93 @@ void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) } } -void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) +static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; + + *(*p)++ = 0x66; + + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + *(*p)++ = 0x0F; + + if (is_store) { + *(*p)++ = 0x7F; + } else { + *(*p)++ = 0x6F; + } + + r = r1 | (r2 << 3); + + if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { + assert(disp == 0); + *(*p)++ = 0xC0 | r; + } else { + assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); + + if (disp == 0 && r1 != 5) { + *(*p)++ = r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + } else { + if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + + IMM32(p, disp); + } + } + } +} + +static FFTS_INLINE void MOVDQA2(uint8_t **p, uint8_t reg1, uint8_t reg2) +{ + if (reg1 & XMM_REG) { + MOVDQA(p, reg2, reg1, 0, 0); + } else { + MOVDQA(p, reg1, reg2, 0, 1); + } +} + +static FFTS_INLINE void MOVDQA3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) +{ + if (reg1 & XMM_REG) { + MOVDQA(p, (uint8_t) op2, reg1, op3, 0); + } else { + MOVDQA(p, reg1, (uint8_t) op3, op2, 1); + } +} + +static void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) { *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); *(*p)++ = 0x8d; ADDRMODE(p, dst, base, disp); } -void RET(uint8_t **p) +static void RET(uint8_t **p) { *(*p)++ = 0xc3; } -void ADDI(uint8_t **p, uint8_t dst, int32_t imm) +static void ADDI(uint8_t **p, uint8_t dst, int32_t imm) { if (dst >= 8) { *(*p)++ = 0x49; @@ -186,7 +281,7 @@ void ADDI(uint8_t **p, uint8_t dst, int32_t imm) } } -void SUBI(uint8_t **p, uint8_t dst, int32_t imm) +static void SUBI(uint8_t **p, uint8_t dst, int32_t imm) { if (dst >= 8) { *(*p)++ = 0x49; @@ -209,13 +304,13 @@ void SUBI(uint8_t **p, uint8_t dst, int32_t imm) } } -void CALL(uint8_t **p, uint8_t *func) +static void CALL(uint8_t **p, uint8_t *func) { *(*p)++ = 0xe8; IMM32(p, func - *p - 4); } -void PUSH(uint8_t **p, uint8_t reg) +static void PUSH(uint8_t **p, uint8_t reg) { if (reg >= 8) { *(*p)++ = 0x41; @@ -224,7 +319,7 @@ void PUSH(uint8_t **p, uint8_t reg) *(*p)++ = 0x50 | (reg & 7); } -void POP(uint8_t **p, uint8_t reg) +static void POP(uint8_t **p, uint8_t reg) { if (reg >= 8) { *(*p)++ = 0x41; @@ -233,7 +328,7 @@ void POP(uint8_t **p, uint8_t reg) *(*p)++ = 0x58 | (reg & 7); } -void SHIFT(uint8_t **p, uint8_t reg, int shift) +static void SHIFT(uint8_t **p, uint8_t reg, int shift) { if (reg >= 8) { *(*p)++ = 0x49; @@ -250,5 +345,4 @@ void SHIFT(uint8_t **p, uint8_t reg, int shift) } } - #endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file -- cgit v1.1 From 0f7c426a663af998e92f586820fd2ca561aafb68 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 4 Nov 2014 12:45:27 +0200 Subject: Refactor generate_func_code --- src/codegen.c | 342 +++--------------------------------------------------- src/codegen_arm.h | 123 ++++++++++++++++++++ src/codegen_sse.h | 242 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 378 insertions(+), 329 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 4e524ca..36fdf8d 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -35,6 +35,12 @@ #include "macros.h" #include "ffts.h" +#ifdef __arm__ +typedef uint32_t insns_t; +#else +typedef uint8_t insns_t; +#endif + #ifdef HAVE_NEON #include "codegen_arm.h" #include "neon.h" @@ -57,14 +63,6 @@ #include #endif -#ifdef __arm__ -typedef uint32_t insns_t; -#else -typedef uint8_t insns_t; -#endif - -#define P(x) (*(*p)++ = x) - static int ffts_tree_count(int N, int leaf_N, int offset) { int count; @@ -100,91 +98,6 @@ static void ffts_elaborate_tree(size_t **p, int N, int leaf_N, int offset) (*p) += 2; } -static void ffts_insert_nops(uint8_t **p, uint32_t count) -{ - if (count >= 9) { - P(0x66); - P(0x0F); - P(0x1F); - P(0x84); - P(0x00); - P(0x00); - P(0x00); - P(0x00); - P(0x00); - - if (count > 9) { - ffts_insert_nops(p, count - 9); - } - } else { - switch(count) { - case 0: - break; - case 2: - P(0x66); - /* fall through */ - case 1: - P(0x90); - break; - case 3: - P(0x0F); - P(0x1F); - P(0x00); - break; - case 4: - P(0x0F); - P(0x1F); - P(0x40); - P(0x00); - break; - case 5: - P(0x0F); - P(0x1F); - P(0x44); - P(0x00); - P(0x00); - break; - case 6: - P(0x66); - P(0x0F); - P(0x1F); - P(0x44); - P(0x00); - P(0x00); - break; - case 7: - P(0x0F); - P(0x1F); - P(0x80); - P(0x00); - P(0x00); - P(0x00); - P(0x00); - break; - case 8: - default: - P(0x0F); - P(0x1F); - P(0x84); - P(0x00); - P(0x00); - P(0x00); - P(0x00); - P(0x00); - break; - } - } -} - -static void ffts_align_mem16(uint8_t **p, uint32_t offset) -{ -#ifdef __x86_64__ - int r = (16 - (offset & 0xf)) - ((uintptr_t)(*p) & 0xf); - r = (16 + r) & 0xf; - ffts_insert_nops(p, r); -#endif -} - transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { uint32_t offsets[8] = {0, N, N/2, 3*N/2, N/4, 5*N/4, 7*N/4, 3*N/4}; @@ -231,187 +144,17 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp = (insns_t*) p->transform_base; -#ifdef __arm__ -#ifdef HAVE_NEON - memcpy(fp, neon_x8, neon_x8_t - neon_x8); - /* - * Changes adds to subtracts and vice versa to allow the computation - * of both the IFFT and FFT - */ - if(sign < 0) { - fp[31] ^= 0x00200000; - fp[32] ^= 0x00200000; - fp[33] ^= 0x00200000; - fp[34] ^= 0x00200000; - fp[65] ^= 0x00200000; - fp[66] ^= 0x00200000; - fp[70] ^= 0x00200000; - fp[74] ^= 0x00200000; - fp[97] ^= 0x00200000; - fp[98] ^= 0x00200000; - fp[102] ^= 0x00200000; - fp[104] ^= 0x00200000; - } - fp += (neon_x8_t - neon_x8) / 4; -#else - memcpy(fp, vfp_x8, vfp_end - vfp_x8); - if(sign > 0) { - fp[65] ^= 0x00000040; - fp[66] ^= 0x00000040; - fp[68] ^= 0x00000040; - fp[70] ^= 0x00000040; - fp[103] ^= 0x00000040; - fp[104] ^= 0x00000040; - fp[105] ^= 0x00000040; - fp[108] ^= 0x00000040; - fp[113] ^= 0x00000040; - fp[114] ^= 0x00000040; - fp[117] ^= 0x00000040; - fp[118] ^= 0x00000040; - } - fp += (vfp_end - vfp_x8) / 4; -#endif -#else - /* align call destination */ - ffts_align_mem16(&fp, 0); - x_8_addr = fp; - - /* align loop/jump destination */ -#ifdef _M_AMD64 - ffts_align_mem16(&fp, 6); -#else - ffts_align_mem16(&fp, 5); -#endif - - /* copy function */ - assert((char*) x8_soft_end > (char*) x8_soft); - len = (char*) x8_soft_end - (char*) x8_soft; - memcpy(fp, x8_soft, (size_t) len); - fp += len; -#endif - //uint32_t *x_8_t_addr = fp; - //memcpy(fp, neon_x8_t, neon_end - neon_x8_t); - //fp += (neon_end - neon_x8_t) / 4; + /* generate base cases */ + x_4_addr = generate_size4_base_case(&fp, sign); + x_8_addr = generate_size8_base_case(&fp, sign); #ifdef __arm__ -#ifdef HAVE_NEON - memcpy(fp, neon_x4, neon_x8 - neon_x4); - if(sign < 0) { - fp[26] ^= 0x00200000; - fp[28] ^= 0x00200000; - fp[31] ^= 0x00200000; - fp[32] ^= 0x00200000; - } - fp += (neon_x8 - neon_x4) / 4; -#else - memcpy(fp, vfp_x4, vfp_x8 - vfp_x4); - if(sign > 0) { - fp[36] ^= 0x00000040; - fp[38] ^= 0x00000040; - fp[43] ^= 0x00000040; - fp[44] ^= 0x00000040; - } - fp += (vfp_x8 - vfp_x4) / 4; -#endif + start = generate_prologue(&fp, p); #else - /* align call destination */ - ffts_align_mem16(&fp, 0); - x_4_addr = fp; - - /* copy function */ - assert((char*) x8_soft > (char*) x4); - len = (char*) x8_soft - (char*) x4; - memcpy(fp, x4, (size_t) len); - fp += len; -#endif - -#ifdef __arm__ - start = fp; - - *fp = PUSH_LR(); - fp++; - *fp = 0xed2d8b10; - fp++; - - ADDI(&fp, 3, 1, 0); - ADDI(&fp, 7, 1, N); - ADDI(&fp, 5, 1, 2*N); - ADDI(&fp, 10, 7, 2*N); - ADDI(&fp, 4, 5, 2*N); - ADDI(&fp, 8, 10, 2*N); - ADDI(&fp, 6, 4, 2*N); - ADDI(&fp, 9, 8, 2*N); - - *fp = LDRI(12, 0, ((uint32_t)&p->offsets) - ((uint32_t)p)); - fp++; // load offsets into r12 - // *fp++ = LDRI(1, 0, 4); // load ws into r1 - ADDI(&fp, 1, 0, 0); - - ADDI(&fp, 0, 2, 0), // mov out into r0 - *fp = LDRI(2, 1, ((uint32_t)&p->ee_ws) - ((uint32_t)p)); - fp++; - -#ifdef HAVE_NEON - MOVI(&fp, 11, p->i0); -#else - MOVI(&fp, 11, p->i0); -#endif -#else - /* align call destination */ - ffts_align_mem16(&fp, 0); - start = fp; - - /* save nonvolatile registers */ -#ifdef _M_AMD64 - /* use the shadow space to save first 3 registers */ - - /* mov [rsp + 8], rbx */ - *fp++ = 0x48; - *fp++ = 0x89; - *fp++ = 0x5C; - *fp++ = 0x24; - *fp++ = 0x08; - - /* mov [rsp + 16], rsi */ - *fp++ = 0x48; - *fp++ = 0x89; - *fp++ = 0x74; - *fp++ = 0x24; - *fp++ = 0x10; - - /* mov [rsp + 24], rdi */ - *fp++ = 0x48; - *fp++ = 0x89; - *fp++ = 0x7C; - *fp++ = 0x24; - *fp++ = 0x18; - - /* reserve space to save XMM6-XMM15 registers */ - SUBI(&fp, RSP, 168); - - MOVDQA3(&fp, RSP, 0, XMM6); - MOVDQA3(&fp, RSP, 16, XMM7); - MOVDQA3(&fp, RSP, 32, XMM8); - MOVDQA3(&fp, RSP, 48, XMM9); - MOVDQA3(&fp, RSP, 64, XMM10); - MOVDQA3(&fp, RSP, 80, XMM11); - MOVDQA3(&fp, RSP, 96, XMM12); - MOVDQA3(&fp, RSP, 112, XMM13); - MOVDQA3(&fp, RSP, 128, XMM14); - MOVDQA3(&fp, RSP, 144, XMM15); -#else - PUSH(&fp, RBP); - PUSH(&fp, RBX); - PUSH(&fp, R10); - PUSH(&fp, R11); - PUSH(&fp, R12); - PUSH(&fp, R13); - PUSH(&fp, R14); - PUSH(&fp, R15); -#endif + start = generate_prologue(&fp, p); /* assign loop counter register */ - loop_count = p->i0 * 4; + loop_count = 4 * p->i0; #ifdef _M_AMD64 MOVI(&fp, EBX, loop_count); #else @@ -937,66 +680,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N *fp++ = POP_LR(); count++; #else - -#ifdef _M_AMD64 - /* restore nonvolatile registers */ - MOVDQA3(&fp, XMM6, RSP, 0); - MOVDQA3(&fp, XMM7, RSP, 16); - MOVDQA3(&fp, XMM8, RSP, 32); - MOVDQA3(&fp, XMM9, RSP, 48); - MOVDQA3(&fp, XMM10, RSP, 64); - MOVDQA3(&fp, XMM11, RSP, 80); - MOVDQA3(&fp, XMM12, RSP, 96); - MOVDQA3(&fp, XMM13, RSP, 112); - MOVDQA3(&fp, XMM14, RSP, 128); - MOVDQA3(&fp, XMM15, RSP, 144); - - /* restore stack */ - ADDI(&fp, RSP, 168); - - /* restore the last 3 registers from the shadow space */ - - /* mov rbx, [rsp + 8] */ - *fp++ = 0x48; - *fp++ = 0x8B; - *fp++ = 0x5C; - *fp++ = 0x24; - *fp++ = 0x08; - - /* mov rsi, [rsp + 16] */ - *fp++ = 0x48; - *fp++ = 0x8B; - *fp++ = 0x74; - *fp++ = 0x24; - *fp++ = 0x10; - - /* mov rdi, [rsp + 24] */ - *fp++ = 0x48; - *fp++ = 0x8B; - *fp++ = 0x7C; - *fp++ = 0x24; - *fp++ = 0x18; -#else - POP(&fp, R15); - POP(&fp, R14); - POP(&fp, R13); - POP(&fp, R12); - POP(&fp, R11); - POP(&fp, R10); - POP(&fp, RBX); - POP(&fp, RBP); -#endif - - RET(&fp); - - //uint8_t *pp = func; - //int counter = 0; - //do{ - // printf("%02x ", *pp); - // if(counter++ % 16 == 15) printf("\n"); - //} while(++pp < fp); - - //printf("\n"); + generate_epilogue(&fp); #endif // *fp++ = B(14); count++; diff --git a/src/codegen_arm.h b/src/codegen_arm.h index 2aea43e..3493a11 100644 --- a/src/codegen_arm.h +++ b/src/codegen_arm.h @@ -95,8 +95,131 @@ void MOVI(uint32_t **p, uint8_t dst, uint32_t imm) { uint32_t PUSH_LR() { return 0xe92d4ff0; } //0xe92d4000; } uint32_t POP_LR() { return 0xe8bd8ff0; } //0xe8bd8000; } +static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) +{ + insns_t *x_4_addr; + size_t len; + + x_4_addr = *fp; + +#ifdef HAVE_NEON + len = (char*) neon_x8 - (char*) neon_x4; + memcpy(x_4_addr, neon_x4, len); + + if (sign < 0) { + x_4_addr[26] ^= 0x00200000; + x_4_addr[28] ^= 0x00200000; + x_4_addr[31] ^= 0x00200000; + x_4_addr[32] ^= 0x00200000; + } +#else + len = (char*) vfp_x8 - (char*) vfp_x4; + memcpy(x_4_addr, vfp_x4, len); + + if (sign > 0) { + x_4_addr[36] ^= 0x00000040; + x_4_addr[38] ^= 0x00000040; + x_4_addr[43] ^= 0x00000040; + x_4_addr[44] ^= 0x00000040; + } +#endif + + *fp += len / 4; + return x_4_addr; +} + +static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) +{ + insns_t *x_8_addr; + ptrdiff_t len; + + x_8_addr = *fp; + +#ifdef HAVE_NEON + len = (char*) neon_x8_t - (char*) neon_x8; + memcpy(x_8_addr, neon_x8, len); + + /* + * Changes adds to subtracts and vice versa to allow the computation + * of both the IFFT and FFT + */ + if (sign < 0) { + x_8_addr[31] ^= 0x00200000; + x_8_addr[32] ^= 0x00200000; + x_8_addr[33] ^= 0x00200000; + x_8_addr[34] ^= 0x00200000; + x_8_addr[65] ^= 0x00200000; + x_8_addr[66] ^= 0x00200000; + x_8_addr[70] ^= 0x00200000; + x_8_addr[74] ^= 0x00200000; + x_8_addr[97] ^= 0x00200000; + x_8_addr[98] ^= 0x00200000; + x_8_addr[102] ^= 0x00200000; + x_8_addr[104] ^= 0x00200000; + } + + *fp += len / 4; + + //uint32_t *x_8_t_addr = fp; + //memcpy(fp, neon_x8_t, neon_end - neon_x8_t); + //fp += (neon_end - neon_x8_t) / 4; +#else + len = (char*) vfp_end - (char*) vfp_x8; + memcpy(x_8_addr, vfp_x8, len); + + if (sign > 0) { + x_8_addr[65] ^= 0x00000040; + x_8_addr[66] ^= 0x00000040; + x_8_addr[68] ^= 0x00000040; + x_8_addr[70] ^= 0x00000040; + x_8_addr[103] ^= 0x00000040; + x_8_addr[104] ^= 0x00000040; + x_8_addr[105] ^= 0x00000040; + x_8_addr[108] ^= 0x00000040; + x_8_addr[113] ^= 0x00000040; + x_8_addr[114] ^= 0x00000040; + x_8_addr[117] ^= 0x00000040; + x_8_addr[118] ^= 0x00000040; + } + + *fp += len / 4; +#endif + return x_8_addr; +} + +static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) +{ + insns_t *start = fp; + + *(*fp)++ = PUSH_LR(); + *(*fp)++ = 0xed2d8b10; + + ADDI(fp, 3, 1, 0); + ADDI(fp, 7, 1, N); + ADDI(fp, 5, 1, 2*N); + ADDI(fp, 10, 7, 2*N); + ADDI(fp, 4, 5, 2*N); + ADDI(fp, 8, 10, 2*N); + ADDI(fp, 6, 4, 2*N); + ADDI(fp, 9, 8, 2*N); + + // load offsets into r12 + *(*fp)++ = LDRI(12, 0, ((uint32_t) &p->offsets) - ((uint32_t) p)); + // *(*fp)++ = LDRI(1, 0, 4); // load ws into r1 + ADDI(fp, 1, 0, 0); + + ADDI(fp, 0, 2, 0), // mov out into r0 + *(*fp)++ = LDRI(2, 1, ((uint32_t) &p->ee_ws) - ((uint32_t) p)); + +#ifdef HAVE_NEON + MOVI(fp, 11, p->i0); +#else + MOVI(fp, 11, p->i0); +#endif + return start; +} #endif // vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 6690b92..33d2b2c 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -105,6 +105,8 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define XMM14 (XMM_REG | 0xe) #define XMM15 (XMM_REG | 0xf) +#define P(x) (*(*p)++ = x) + static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); @@ -345,4 +347,244 @@ static void SHIFT(uint8_t **p, uint8_t reg, int shift) } } +static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) +{ + if (count >= 9) { + P(0x66); + P(0x0F); + P(0x1F); + P(0x84); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + + if (count > 9) { + ffts_insert_nops(p, count - 9); + } + } else { + switch(count) { + case 0: + break; + case 2: + P(0x66); + /* fall through */ + case 1: + P(0x90); + break; + case 3: + P(0x0F); + P(0x1F); + P(0x00); + break; + case 4: + P(0x0F); + P(0x1F); + P(0x40); + P(0x00); + break; + case 5: + P(0x0F); + P(0x1F); + P(0x44); + P(0x00); + P(0x00); + break; + case 6: + P(0x66); + P(0x0F); + P(0x1F); + P(0x44); + P(0x00); + P(0x00); + break; + case 7: + P(0x0F); + P(0x1F); + P(0x80); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + break; + case 8: + default: + P(0x0F); + P(0x1F); + P(0x84); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + P(0x00); + break; + } + } +} + +static FFTS_INLINE void ffts_align_mem16(uint8_t **p, uint32_t offset) +{ + int r = (16 - (offset & 0xf)) - ((uintptr_t)(*p) & 0xf); + r = (16 + r) & 0xf; + ffts_insert_nops(p, r); +} + +static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) +{ + insns_t *x_4_addr; + size_t len; + + /* align call destination */ + ffts_align_mem16(fp, 0); + x_4_addr = *fp; + + /* copy function */ + assert((char*) x8_soft > (char*) x4); + len = (char*) x8_soft - (char*) x4; + memcpy(*fp, x4, len); + *fp += len; + + return x_4_addr; +} + +static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) +{ + insns_t *x_8_addr; + size_t len; + + /* align call destination */ + ffts_align_mem16(fp, 0); + x_8_addr = *fp; + + /* align loop/jump destination */ +#ifdef _M_AMD64 + ffts_align_mem16(fp, 6); +#else + ffts_align_mem16(fp, 5); +#endif + + /* copy function */ + assert((char*) x8_soft_end > (char*) x8_soft); + len = (char*) x8_soft_end - (char*) x8_soft; + memcpy(*fp, x8_soft, len); + *fp += len; + + return x_8_addr; +} + +static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) +{ + insns_t *start; + + /* align call destination */ + ffts_align_mem16(fp, 0); + start = *fp; + + /* save nonvolatile registers */ +#ifdef _M_AMD64 + /* use the shadow space to save first 3 registers */ + + /* mov [rsp + 8], rbx */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x89; + *(*fp)++ = 0x5C; + *(*fp)++ = 0x24; + *(*fp)++ = 0x08; + + /* mov [rsp + 16], rsi */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x89; + *(*fp)++ = 0x74; + *(*fp)++ = 0x24; + *(*fp)++ = 0x10; + + /* mov [rsp + 24], rdi */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x89; + *(*fp)++ = 0x7C; + *(*fp)++ = 0x24; + *(*fp)++ = 0x18; + + /* reserve space to save XMM6-XMM15 registers */ + SUBI(fp, RSP, 168); + + MOVDQA3(fp, RSP, 0, XMM6); + MOVDQA3(fp, RSP, 16, XMM7); + MOVDQA3(fp, RSP, 32, XMM8); + MOVDQA3(fp, RSP, 48, XMM9); + MOVDQA3(fp, RSP, 64, XMM10); + MOVDQA3(fp, RSP, 80, XMM11); + MOVDQA3(fp, RSP, 96, XMM12); + MOVDQA3(fp, RSP, 112, XMM13); + MOVDQA3(fp, RSP, 128, XMM14); + MOVDQA3(fp, RSP, 144, XMM15); +#else + PUSH(fp, RBP); + PUSH(fp, RBX); + PUSH(fp, R10); + PUSH(fp, R11); + PUSH(fp, R12); + PUSH(fp, R13); + PUSH(fp, R14); + PUSH(fp, R15); +#endif + + return start; +} + +static FFTS_INLINE void generate_epilogue(insns_t **fp) +{ +#ifdef _M_AMD64 + /* restore nonvolatile registers */ + MOVDQA3(fp, XMM6, RSP, 0); + MOVDQA3(fp, XMM7, RSP, 16); + MOVDQA3(fp, XMM8, RSP, 32); + MOVDQA3(fp, XMM9, RSP, 48); + MOVDQA3(fp, XMM10, RSP, 64); + MOVDQA3(fp, XMM11, RSP, 80); + MOVDQA3(fp, XMM12, RSP, 96); + MOVDQA3(fp, XMM13, RSP, 112); + MOVDQA3(fp, XMM14, RSP, 128); + MOVDQA3(fp, XMM15, RSP, 144); + + /* restore stack */ + ADDI(fp, RSP, 168); + + /* restore the last 3 registers from the shadow space */ + + /* mov rbx, [rsp + 8] */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x8B; + *(*fp)++ = 0x5C; + *(*fp)++ = 0x24; + *(*fp)++ = 0x08; + + /* mov rsi, [rsp + 16] */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x8B; + *(*fp)++ = 0x74; + *(*fp)++ = 0x24; + *(*fp)++ = 0x10; + + /* mov rdi, [rsp + 24] */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x8B; + *(*fp)++ = 0x7C; + *(*fp)++ = 0x24; + *(*fp)++ = 0x18; +#else + POP(fp, R15); + POP(fp, R14); + POP(fp, R13); + POP(fp, R12); + POP(fp, R11); + POP(fp, R10); + POP(fp, RBX); + POP(fp, RBP); +#endif + + RET(fp); +} + #endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file -- cgit v1.1 From 311c05f05c4e9bee5f4731c6a3cd6b8122fc14b4 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 4 Nov 2014 15:04:06 +0200 Subject: Replace _M_AMD64 with _M_X64 as it is equal and "neutral" --- src/codegen.c | 19 ++++++++++--------- src/codegen_sse.h | 6 +++--- src/ffts.h | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 36fdf8d..880f598 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -155,7 +155,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* assign loop counter register */ loop_count = 4 * p->i0; -#ifdef _M_AMD64 +#ifdef _M_X64 MOVI(&fp, EBX, loop_count); #else MOVI(&fp, ECX, loop_count); @@ -210,7 +210,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += len; /* align loop/jump destination */ -#ifdef _M_AMD64 +#ifdef _M_X64 ffts_align_mem16(&fp, 8); #else ffts_align_mem16(&fp, 9); @@ -233,7 +233,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N loop_count += 4 * p->i1; /* align loop/jump destination */ -#ifdef _M_AMD64 +#ifdef _M_X64 MOVI(&fp, EBX, loop_count); ffts_align_mem16(&fp, 3); #else @@ -286,7 +286,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N loop_count += 4 * p->i1; /* align loop/jump destination */ -#ifdef _M_AMD64 +#ifdef _M_X64 MOVI(&fp, EBX, loop_count); ffts_align_mem16(&fp, 3); #else @@ -313,7 +313,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N loop_count += 4 * p->i1; /* align loop/jump destination */ -#ifdef _M_AMD64 +#ifdef _M_X64 MOVI(&fp, EBX, loop_count); ffts_align_mem16(&fp, 8); #else @@ -337,12 +337,13 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N memcpy(fp, x_init, len); fp += len; + /* generate subtransform calls */ count = 2; while (pps[0]) { size_t ws_is; if (!pN) { -#ifdef _M_AMD64 +#ifdef _M_X64 MOVI(&fp, EBX, pps[0]); #else MOVI(&fp, ECX, pps[0] / 4); @@ -350,7 +351,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N } else { int offset = (4 * pps[1]) - pAddr; if (offset) { -#ifdef _M_AMD64 +#ifdef _M_X64 ADDI(&fp, R8, offset); #else ADDI(&fp, RDX, offset); @@ -360,7 +361,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if (pps[0] > leaf_N && pps[0] - pN) { int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN); -#ifdef _M_AMD64 +#ifdef _M_X64 SHIFT(&fp, EBX, factor); #else SHIFT(&fp, ECX, factor); @@ -372,7 +373,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if (ws_is != pLUT) { int offset = (int) (ws_is - pLUT); -#ifdef _M_AMD64 +#ifdef _M_X64 ADDI(&fp, RDI, offset); #else ADDI(&fp, R8, offset); diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 33d2b2c..d65af9a 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -458,7 +458,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x_8_addr = *fp; /* align loop/jump destination */ -#ifdef _M_AMD64 +#ifdef _M_X64 ffts_align_mem16(fp, 6); #else ffts_align_mem16(fp, 5); @@ -482,7 +482,7 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) start = *fp; /* save nonvolatile registers */ -#ifdef _M_AMD64 +#ifdef _M_X64 /* use the shadow space to save first 3 registers */ /* mov [rsp + 8], rbx */ @@ -535,7 +535,7 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) static FFTS_INLINE void generate_epilogue(insns_t **fp) { -#ifdef _M_AMD64 +#ifdef _M_X64 /* restore nonvolatile registers */ MOVDQA3(fp, XMM6, RSP, 0); MOVDQA3(fp, XMM7, RSP, 16); diff --git a/src/ffts.h b/src/ffts.h index ca2951a..156a3b3 100644 --- a/src/ffts.h +++ b/src/ffts.h @@ -187,7 +187,7 @@ static FFTS_INLINE void ffts_aligned_free(void *p) #define ffts_ctzl __builtin_ctzl #elif defined(_MSC_VER) #include -#ifdef _M_AMD64 +#ifdef _M_X64 #pragma intrinsic(_BitScanForward64) static __inline unsigned long ffts_ctzl(size_t N) { -- cgit v1.1 From 160d9c835c114fea9f03cff9b19979f1e4c1824b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 4 Nov 2014 18:48:23 +0200 Subject: Generate leaf_ee_init and x_init instead of copying --- src/codegen.c | 28 ++++++-- src/codegen_sse.h | 207 +++++++++++++++++++++++++++++++++++++++++------------- src/sse_win64.s | 8 +-- 3 files changed, 188 insertions(+), 55 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 880f598..9d95519 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -203,16 +203,27 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N #else //fprintf(stderr, "Body start address = %016p\n", start); +#ifdef _M_X64 + /* generate function */ + + /* clear */ + XOR2(&fp, EAX, EAX); + + /* set "pointer" to offsets */ + MOV(&fp, RDI, RCX, 0, 0); + + /* set "pointer" to constants */ + MOV(&fp, RSI, RCX, 0xE0, 0); + + /* align loop/jump destination */ + ffts_align_mem16(&fp, 8); +#else /* copy function */ assert((char*) leaf_ee > (char*) leaf_ee_init); len = (char*) leaf_ee - (char*) leaf_ee_init; memcpy(fp, leaf_ee_init, (size_t) len); fp += len; - /* align loop/jump destination */ -#ifdef _M_X64 - ffts_align_mem16(&fp, 8); -#else ffts_align_mem16(&fp, 9); #endif @@ -332,10 +343,19 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += len; } +#ifdef _M_X64 + /* generate function */ + MOVAPS2(&fp, XMM3, RSI); + + /* set "pointer" to twiddle factors */ + MOV(&fp, RDI, RCX, 0x20, 0); +#else + /* copy function */ assert((char*) x4 > (char*) x_init); len = (char*) x4 - (char*) x_init; memcpy(fp, x_init, len); fp += len; +#endif /* generate subtransform calls */ count = 2; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index d65af9a..f1b1500 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -130,6 +130,15 @@ static void IMM32(uint8_t **p, int32_t imm) } } +static void IMM64(uint8_t **p, int64_t imm) +{ + int i; + + for (i = 0; i < 8; i++) { + *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); + } +} + static void IMM32_NI(uint8_t *p, int32_t imm) { int i; @@ -151,14 +160,29 @@ static int32_t READ_IMM32(uint8_t *p) return rval; } -static void MOVI(uint8_t **p, uint8_t dst, uint32_t imm) +static void MOVI(uint8_t **p, uint8_t dst, uint64_t imm) { - if (dst >= 8) { - *(*p)++ = 0x41; + if (dst >= 8 || imm > UINT32_MAX) { + uint8_t val = 0x40; + + if (dst >= 8) { + val |= 1; + } + + if (imm > UINT32_MAX) { + val |= 8; + } + + *(*p)++ = val; } *(*p)++ = 0xb8 | (dst & 0x7); - IMM32(p, imm); + + if (imm > UINT32_MAX) { + IMM64(p, imm); + } else { + IMM32(p, imm); + } } static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) @@ -174,6 +198,78 @@ static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) } } +static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; + + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + *(*p)++ = 0x0F; + + if (is_store) { + *(*p)++ = 0x29; + } else { + *(*p)++ = 0x28; + } + + r = r1 | (r2 << 3); + + if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { + assert(disp == 0); + *(*p)++ = 0xC0 | r; + } else { + assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); + + if (disp == 0 && r1 != 5) { + *(*p)++ = r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + } else { + if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + + IMM32(p, disp); + } + } + } +} + +static FFTS_INLINE void MOVAPS2(uint8_t **p, uint8_t reg1, uint8_t reg2) +{ + if (reg1 & XMM_REG) { + MOVAPS(p, reg2, reg1, 0, 0); + } else { + MOVAPS(p, reg1, reg2, 0, 1); + } +} + +static FFTS_INLINE void MOVAPS3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) +{ + if (reg1 & XMM_REG) { + MOVAPS(p, (uint8_t) op2, reg1, op3, 0); + } else { + MOVAPS(p, reg1, (uint8_t) op3, op2, 1); + } +} + static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { uint8_t r1 = (reg1 & 7); @@ -347,6 +443,58 @@ static void SHIFT(uint8_t **p, uint8_t reg, int shift) } } +static FFTS_INLINE void MOV(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } + + if (is_store) { + *(*p)++ = 0x89; + } else { + *(*p)++ = 0x8B; + } + + if (disp == 0) { + *(*p)++ = r2 | (r1 << 3); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + } else if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r2 | (r1 << 3); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + + IMM32(p, disp); + } +} + +static FFTS_INLINE void XOR2(uint8_t **p, uint8_t reg1, uint8_t reg2) +{ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + *(*p)++ = 0x31; + *(*p)++ = 0xC0 | (reg2 & 7) | ((reg1 & 7) << 3); +} + static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) { if (count >= 9) { @@ -484,31 +632,14 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) /* save nonvolatile registers */ #ifdef _M_X64 /* use the shadow space to save first 3 registers */ + MOV(fp, RBX, RSP, 8, 1); + MOV(fp, RSI, RSP, 16, 1); + MOV(fp, RDI, RSP, 24, 1); - /* mov [rsp + 8], rbx */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x89; - *(*fp)++ = 0x5C; - *(*fp)++ = 0x24; - *(*fp)++ = 0x08; - - /* mov [rsp + 16], rsi */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x89; - *(*fp)++ = 0x74; - *(*fp)++ = 0x24; - *(*fp)++ = 0x10; - - /* mov [rsp + 24], rdi */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x89; - *(*fp)++ = 0x7C; - *(*fp)++ = 0x24; - *(*fp)++ = 0x18; - - /* reserve space to save XMM6-XMM15 registers */ + /* reserve space.. */ SUBI(fp, RSP, 168); + /* to save XMM6-XMM15 registers */ MOVDQA3(fp, RSP, 0, XMM6); MOVDQA3(fp, RSP, 16, XMM7); MOVDQA3(fp, RSP, 32, XMM8); @@ -552,27 +683,9 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) ADDI(fp, RSP, 168); /* restore the last 3 registers from the shadow space */ - - /* mov rbx, [rsp + 8] */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x8B; - *(*fp)++ = 0x5C; - *(*fp)++ = 0x24; - *(*fp)++ = 0x08; - - /* mov rsi, [rsp + 16] */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x8B; - *(*fp)++ = 0x74; - *(*fp)++ = 0x24; - *(*fp)++ = 0x10; - - /* mov rdi, [rsp + 24] */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x8B; - *(*fp)++ = 0x7C; - *(*fp)++ = 0x24; - *(*fp)++ = 0x18; + MOV(fp, RBX, RSP, 8, 0); + MOV(fp, RSI, RSP, 16, 0); + MOV(fp, RDI, RSP, 24, 0); #else POP(fp, R15); POP(fp, R14); diff --git a/src/sse_win64.s b/src/sse_win64.s index 2aa76cd..c92358f 100644 --- a/src/sse_win64.s +++ b/src/sse_win64.s @@ -53,13 +53,13 @@ _leaf_ee_init: leaf_ee_init: #endif +# rax is loop counter (init to 0) # rcx is a pointer to the ffts_plan -# eax is loop counter (init to 0) -# rbx is loop max count # rdx is 'in' base pointer -# r8 is 'out' base pointer -# rdi is offsets pointer +# rbx is loop max count # rsi is constants pointer +# rdi is offsets pointer +# r8 is 'out' base pointer # scratch: rax r10 r11 xorl %eax, %eax -- cgit v1.1 From b4efe4fc9fa2485679c4f6e4a9963c99d791aa0b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 5 Nov 2014 11:43:08 +0200 Subject: Reorder functions to alphabetical order --- src/codegen_sse.h | 328 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 206 insertions(+), 122 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index f1b1500..abb5008 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -107,6 +107,52 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define P(x) (*(*p)++ = x) +/* forward declarations */ +static void IMM8(uint8_t **p, int32_t imm); +static void IMM32(uint8_t **p, int32_t imm); + +static void ADDI(uint8_t **p, uint8_t dst, int32_t imm) +{ + if (dst >= 8) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } + + if (imm > 127 || imm <= -128) { + *(*p)++ = 0x81; + } else { + *(*p)++ = 0x83; + } + + *(*p)++ = 0xc0 | (dst & 0x7); + + if (imm > 127 || imm <= -128) { + IMM32(p, imm); + } else { + IMM8(p, imm); + } +} + +static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) +{ + if (disp == 0) { + *(*p)++ = (rm & 7) | ((reg & 7) << 3); + } else if (disp <= 127 || disp >= -128) { + *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3); + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3); + IMM32(p, disp); + } +} + +static void CALL(uint8_t **p, uint8_t *func) +{ + *(*p)++ = 0xe8; + IMM32(p, func - *p - 4); +} + static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); @@ -148,53 +194,52 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static int32_t READ_IMM32(uint8_t *p) +static void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) { - int32_t rval = 0; - int i; - - for (i = 0; i < 4; i++) { - rval |= *(p+i) << (8 * i); - } - - return rval; + *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); + *(*p)++ = 0x8d; + ADDRMODE(p, dst, base, disp); } -static void MOVI(uint8_t **p, uint8_t dst, uint64_t imm) +static FFTS_INLINE void MOV(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { - if (dst >= 8 || imm > UINT32_MAX) { - uint8_t val = 0x40; - - if (dst >= 8) { - val |= 1; - } - - if (imm > UINT32_MAX) { - val |= 8; - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); - *(*p)++ = val; - } + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } - *(*p)++ = 0xb8 | (dst & 0x7); - - if (imm > UINT32_MAX) { - IMM64(p, imm); + if (is_store) { + *(*p)++ = 0x89; } else { - IMM32(p, imm); + *(*p)++ = 0x8B; } -} -static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) -{ - if (disp == 0) { - *(*p)++ = (rm & 7) | ((reg & 7) << 3); - } else if (disp <= 127 || disp >= -128) { - *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3); - IMM8(p, disp); + if (disp == 0) { + *(*p)++ = r2 | (r1 << 3); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + } else if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r2 | (r1 << 3); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + + IMM8(p, disp); } else { - *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3); - IMM32(p, disp); + *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); + + if (r2 == 4) { + *(*p)++ = 0x24; + } + + IMM32(p, disp); } } @@ -204,12 +249,15 @@ static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t uint8_t r2 = (reg2 & 7); uint8_t r; + /* REX prefix */ if ((reg1 & 8) || (reg2 & 8)) { *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); } + /* esacape opcode */ *(*p)++ = 0x0F; + /* opcode */ if (is_store) { *(*p)++ = 0x29; } else { @@ -276,14 +324,18 @@ static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t uint8_t r2 = (reg2 & 7); uint8_t r; + /* mandatory prefix */ *(*p)++ = 0x66; + /* REX prefix */ if ((reg1 & 8) || (reg2 & 8)) { *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); } + /* esacape opcode */ *(*p)++ = 0x0F; + /* opcode */ if (is_store) { *(*p)++ = 0x7F; } else { @@ -344,86 +396,137 @@ static FFTS_INLINE void MOVDQA3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t } } -static void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) +static void MOVI(uint8_t **p, uint8_t dst, uint64_t imm) { - *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); - *(*p)++ = 0x8d; - ADDRMODE(p, dst, base, disp); -} + /* REX prefix */ + if (dst >= 8 || imm > UINT32_MAX) { + uint8_t val = 0x40; + + if (dst >= 8) { + val |= 1; + } -static void RET(uint8_t **p) -{ - *(*p)++ = 0xc3; + if (imm > UINT32_MAX) { + val |= 8; + } + + *(*p)++ = val; + } + + /* opcode */ + *(*p)++ = 0xb8 | (dst & 0x7); + + if (imm > UINT32_MAX) { + IMM64(p, imm); + } else { + IMM32(p, imm); + } } -static void ADDI(uint8_t **p, uint8_t dst, int32_t imm) +static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { - if (dst >= 8) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; - if (imm > 127 || imm <= -128) { - *(*p)++ = 0x81; - } else { - *(*p)++ = 0x83; - } + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } - *(*p)++ = 0xc0 | (dst & 0x7); + /* esacape opcode */ + *(*p)++ = 0x0F; + + /* opcode */ + *(*p)++ = 0x59; + + r = r1 | (r2 << 3); - if (imm > 127 || imm <= -128) { - IMM32(p, imm); - } else { - IMM8(p, imm); - } -} + if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { + assert(disp == 0); + *(*p)++ = 0xC0 | r; + } else { + assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); -static void SUBI(uint8_t **p, uint8_t dst, int32_t imm) -{ - if (dst >= 8) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } + if (disp == 0 && r1 != 5) { + *(*p)++ = r; - if (imm > 127 || imm <= -128) { - *(*p)++ = 0x81; - } else { - *(*p)++ = 0x83; - } + if (r1 == 4) { + *(*p)++ = 0x24; + } + } else { + if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r; - *(*p)++ = 0xe8 | (dst & 0x7); + if (r1 == 4) { + *(*p)++ = 0x24; + } - if (imm > 127 || imm <= -128) { - IMM32(p, imm); - } else { - IMM8(p, imm); - } + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r; + + if (r1 == 4) { + *(*p)++ = 0x24; + } + + IMM32(p, disp); + } + } + } } -static void CALL(uint8_t **p, uint8_t *func) +static FFTS_INLINE void MULPS2(uint8_t **p, uint8_t reg1, uint8_t reg2) { - *(*p)++ = 0xe8; - IMM32(p, func - *p - 4); + if (reg1 & XMM_REG) { + MULPS(p, reg2, reg1, 0, 0); + } else { + MULPS(p, reg1, reg2, 0, 1); + } } -static void PUSH(uint8_t **p, uint8_t reg) +static FFTS_INLINE void MULPS3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) +{ + if (reg1 & XMM_REG) { + MULPS(p, (uint8_t) op2, reg1, op3, 0); + } else { + MULPS(p, reg1, (uint8_t) op3, op2, 1); + } +} + +static void POP(uint8_t **p, uint8_t reg) { if (reg >= 8) { *(*p)++ = 0x41; } - *(*p)++ = 0x50 | (reg & 7); + *(*p)++ = 0x58 | (reg & 7); } -static void POP(uint8_t **p, uint8_t reg) +static void PUSH(uint8_t **p, uint8_t reg) { if (reg >= 8) { *(*p)++ = 0x41; } - *(*p)++ = 0x58 | (reg & 7); + *(*p)++ = 0x50 | (reg & 7); +} + +static int32_t READ_IMM32(uint8_t *p) +{ + int32_t rval = 0; + int i; + + for (i = 0; i < 4; i++) { + rval |= *(p+i) << (8 * i); + } + + return rval; +} + +static void RET(uint8_t **p) +{ + *(*p)++ = 0xc3; } static void SHIFT(uint8_t **p, uint8_t reg, int shift) @@ -443,45 +546,26 @@ static void SHIFT(uint8_t **p, uint8_t reg, int shift) } } -static FFTS_INLINE void MOV(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) +static void SUBI(uint8_t **p, uint8_t dst, int32_t imm) { - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } - - if (is_store) { - *(*p)++ = 0x89; - } else { - *(*p)++ = 0x8B; - } - - if (disp == 0) { - *(*p)++ = r2 | (r1 << 3); - - if (r2 == 4) { - *(*p)++ = 0x24; - } - } else if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r2 | (r1 << 3); - - if (r2 == 4) { - *(*p)++ = 0x24; - } + if (dst >= 8) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } - IMM8(p, disp); + if (imm > 127 || imm <= -128) { + *(*p)++ = 0x81; } else { - *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); + *(*p)++ = 0x83; + } - if (r2 == 4) { - *(*p)++ = 0x24; - } + *(*p)++ = 0xe8 | (dst & 0x7); - IMM32(p, disp); + if (imm > 127 || imm <= -128) { + IMM32(p, imm); + } else { + IMM8(p, imm); } } -- cgit v1.1 From a0db4af6fe8f68a62cbf993871137d4cd341dfc5 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 5 Nov 2014 11:43:56 +0200 Subject: Import Sequitur algorithm from SFFT --- src/sequitur.h | 448 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 448 insertions(+) create mode 100644 src/sequitur.h diff --git a/src/sequitur.h b/src/sequitur.h new file mode 100644 index 0000000..7429115 --- /dev/null +++ b/src/sequitur.h @@ -0,0 +1,448 @@ +/* + + This file is part of FFTS -- The Fastest Fourier Transform in the South + + Copyright (c) 2012, Anthony M. Blake + Copyright (c) 2012, The University of Waikato + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +typedef struct _sym_t { + int c; + struct _sym_t *pPrev, *pNext; + struct _seq_rule_t *r; + int offset; +} sym_t; + +typedef struct _seq_rule_t { + int c; + sym_t *ss; + struct _seq_rule_t *pPrev, *pNext; + int count; + int length; +} seq_rule_t; + +void sym_tail_insert(sym_t **ss, sym_t *s) +{ + if (!*ss) { + *ss = s; + s->pPrev = s->pNext = NULL; + } else { + while (*ss) { + s->pPrev = *ss; + ss = &(*ss)->pNext; + } + + *ss = s; + } +} + +sym_t* sym_init(int c) +{ + sym_t *s; + + s = (sym_t*) malloc(sizeof(*s)); + if (!s) { + return NULL; + } + + s->c = c; + s->pPrev = s->pNext = NULL; + s->r = NULL; + + return s; +} + +sym_t* sym_init_from_sym(sym_t *s2) +{ + sym_t *s; + + s = (sym_t*) malloc(sizeof(*s)); + if (!s) { + return NULL; + } + + s->c = s2->c; + s->pPrev = s->pNext = NULL; + s->r = s2->r; + s->offset = s2->offset; + + return s; +} + +seq_rule_t* seq_init_rule(int c) +{ + seq_rule_t *G; + + G = (seq_rule_t *)malloc(sizeof(*G)); + if (!G) { + return NULL; + } + + G->c = c; + G->count = 2; + G->ss = NULL; + G->pPrev = NULL; + G->pNext = NULL; + + return G; +} + +seq_rule_t* seq_grammer_insert_new_rule(seq_rule_t *G, char r, sym_t *a, sym_t *b) +{ + sym_t *sa, *sb; + + while (G->pNext) { + G = G->pNext; + } + + G->pNext = seq_init_rule(r); + if (!G->pNext) { + return NULL; + } + + sa = sym_init_from_sym(a); + if (!sa) { + goto cleanup_pnext; + } + + sb = sym_init_from_sym(b); + if (!sb) { + goto cleanup_sa; + } + + sb->offset = sb->offset - sa->offset; + sa->offset = 0; + sym_tail_insert(&G->pNext->ss, sa); + sym_tail_insert(&G->pNext->ss, sb); + return G->pNext; + +cleanup_sa: + free(sa); + +cleanup_pnext: + free(G->pNext); + G->pNext = NULL; + + return NULL; +} + +sym_t* sym_match_digram(sym_t *s, sym_t *term, sym_t *a, sym_t *b) +{ + while (s != term) { + if (s->c == a->c && s->pNext->c == b->c && + s->pNext->offset - s->offset == b->offset-a->offset) { + return s; + } + + s = s->pNext; + } + + return NULL; +} + +seq_rule_t* seq_match_digram(seq_rule_t *R, sym_t *a, sym_t *b) +{ + while (R) { + if (R->ss->c == a->c && R->ss->pNext->c == b->c && + R->ss->pNext->offset - R->ss->offset == b->offset - a->offset) { + return R; + } + + R = R->pNext; + } + + return NULL; +} + +sym_t* sym_tail(sym_t *s) +{ + while (s->pNext) { + s = s->pNext; + } + + return s; +} + +int sym_count(sym_t *s) +{ + int count = 0; + + while (s) { + count++; + s = s->pNext; + } + + return count; +} + +sym_t* sym_copylist(sym_t *s) +{ + sym_t *head = NULL; + sym_t *prev = head; + + while (s) { + sym_t *copy = sym_init_from_sym(s); + if (!copy) { + return NULL; + } + + copy->pPrev = prev; + + if (prev) { + prev->pNext = copy; + } + + if (!head) { + head = copy; + } + + prev = copy; + s = s->pNext; + } + + return head; +} + +void seq_enforce_uniqueness(seq_rule_t *G) +{ + seq_rule_t *R = G;//->pNext; + seq_rule_t **ppr = &G->pNext; + + while (R) { + if (R == G || R->count > 1) { + sym_t *s = R->ss; + sym_t **pp = &R->ss; + + while (s) { + if (s->r && s->r->count == 1) { + sym_t *temp_itr; + + *pp = s->r->ss; + + temp_itr = s->r->ss; + while (temp_itr) { + temp_itr->offset += s->offset; + temp_itr = temp_itr->pNext; + } + + s->r->ss->pPrev = s->pPrev; + if (s->pNext) { + s->pNext->pPrev = sym_tail(s->r->ss); + } + + sym_tail(s->r->ss)->pNext = s->pNext; + s = s->r->ss; + continue; + } + + pp = &s->pNext; + s = s->pNext; + } + + ppr = &R->pNext; + } else { + *ppr = R->pNext; + } + + R = R->pNext; + } +} + +void seq_merge_small_rules(seq_rule_t *G, int thresh) +{ + seq_rule_t *R = G; + + while (R) { + if (sym_count(R->ss) <= thresh) { + //printf("count %d > %d for %d\n", sym_count(R->ss), thresh, R->c); + sym_t *s = R->ss; + sym_t **pp = &R->ss; + + while (s) { + if (s->r) { + sym_t *copylist; + sym_t *copylist_itr; + + s->r->count--; + + copylist = sym_copylist(s->r->ss); + if (!copylist) { + return; + } + + copylist_itr = copylist; + while (copylist_itr) { + copylist_itr->offset += s->offset; + copylist_itr = copylist_itr->pNext; + } + + *pp = copylist; + copylist->pPrev = s->pPrev; + if (s->pNext) { + s->pNext->pPrev = sym_tail(copylist); + } + + sym_tail(copylist)->pNext = s->pNext; + pp = &(sym_tail(copylist)->pNext); + s = sym_tail(copylist)->pNext; + continue; + } + + pp = &s->pNext; + s = s->pNext; + } + } + + R = R->pNext; + } + + seq_enforce_uniqueness(G); +} + +void seq_extract_hierarchy(seq_rule_t *G) +{ + int next_rule = -2; + sym_t *cursym = G->ss; + + while (cursym) { + sym_t *m = NULL; + seq_rule_t *mr = NULL; + + if (cursym->pPrev && cursym->pPrev->pPrev) { + mr = seq_match_digram(G->pNext, cursym->pPrev, cursym); + if (mr) { + if (cursym->pPrev->r) { + cursym->pPrev->r->count--; + } + + if(cursym->r) { + cursym->r->count--; + } + + mr->count++; + + cursym->pPrev->r = mr; + cursym->pPrev->c = mr->c; + cursym->pPrev->pNext = cursym->pNext; + cursym->pNext->pPrev = cursym->pPrev; + cursym = cursym->pPrev; + } + + m = sym_match_digram(G->ss, cursym->pPrev->pPrev, cursym->pPrev, cursym); + if (m) { + seq_rule_t *newr; + + if (cursym->pPrev->r) { + cursym->pPrev->r->count--; + } + + if (cursym->r) { + cursym->r->count--; + } + + newr = seq_grammer_insert_new_rule(G, next_rule, m, m->pNext); + if (!newr) { + return; + } + + m->r = newr; + m->c = next_rule; + m->pNext = m->pNext->pNext; + m->pNext->pPrev = m; + + cursym->pPrev->r = newr; + cursym->pPrev->c = next_rule; + cursym->pPrev->pNext = cursym->pNext; + cursym->pNext->pPrev = cursym->pPrev; + cursym = cursym->pPrev; + + next_rule--; + } + } + + if (!m && !mr) { + cursym = cursym->pNext; + } + } + + seq_enforce_uniqueness(G); + seq_merge_small_rules(G, 2); +// seq_enforce_uniqueness(G); +} + +void seq_compute_lengths(seq_rule_t *G) +{ + seq_rule_t *R = G->pNext; + sym_t *s; + int sum; + + while (R) { + sum = 0; + s = R->ss; + + while (s) { + if (s->c >= 0) { + if (s->offset + s->c > sum) { + sum = s->offset + s->c; + } + } + + if (s->c < 0) { + if (s->offset + s->r->length > sum) { + sum = s->offset + s->r->length; + } + } + + s = s->pNext; + } + + R->length = sum; + R = R->pNext; + } + + sum = 0; + s = G->ss; + + while (s) { + if (s->c >= 0) { + if (s->offset + s->c > sum) { + sum = s->offset + s->c; + } + } + + if (s->c < 0) { + if (s->offset + s->r->length > sum) { + sum = s->offset + s->r->length; + } + } + + s = s->pNext; + } + + G->length = sum; +} \ No newline at end of file -- cgit v1.1 From 18d19a9f8b4e409b4db46338c9040a61555f9c58 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 6 Nov 2014 11:38:55 +0200 Subject: Win64 actually "generate_size8_base_case" instead of copying --- src/codegen.c | 74 ++-- src/codegen_sse.h | 1059 +++++++++++++++++++++++++++++++++++++---------------- src/sequitur.h | 630 +++++++++++++++---------------- 3 files changed, 1090 insertions(+), 673 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 9d95519..4e70cb1 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -144,21 +144,21 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp = (insns_t*) p->transform_base; - /* generate base cases */ - x_4_addr = generate_size4_base_case(&fp, sign); - x_8_addr = generate_size8_base_case(&fp, sign); + /* generate base cases */ + x_4_addr = generate_size4_base_case(&fp, sign); + x_8_addr = generate_size8_base_case(&fp, sign); #ifdef __arm__ - start = generate_prologue(&fp, p); + start = generate_prologue(&fp, p); #else - start = generate_prologue(&fp, p); + start = generate_prologue(&fp, p); /* assign loop counter register */ loop_count = 4 * p->i0; #ifdef _M_X64 - MOVI(&fp, EBX, loop_count); + MOV_I(&fp, EBX, loop_count); #else - MOVI(&fp, ECX, loop_count); + MOV_I(&fp, ECX, loop_count); #endif #endif @@ -204,18 +204,18 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N //fprintf(stderr, "Body start address = %016p\n", start); #ifdef _M_X64 - /* generate function */ + /* generate function */ - /* clear */ - XOR2(&fp, EAX, EAX); - - /* set "pointer" to offsets */ - MOV(&fp, RDI, RCX, 0, 0); + /* clear */ + XOR2(&fp, EAX, EAX); - /* set "pointer" to constants */ - MOV(&fp, RSI, RCX, 0xE0, 0); + /* set "pointer" to offsets */ + MOV_D(&fp, RDI, RCX, 0, 0); - /* align loop/jump destination */ + /* set "pointer" to constants */ + MOV_D(&fp, RSI, RCX, 0xE0, 0); + + /* align loop/jump destination */ ffts_align_mem16(&fp, 8); #else /* copy function */ @@ -245,10 +245,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOVI(&fp, EBX, loop_count); + MOV_I(&fp, EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOVI(&fp, ECX, loop_count); + MOV_I(&fp, ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -298,10 +298,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOVI(&fp, EBX, loop_count); + MOV_I(&fp, EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOVI(&fp, ECX, loop_count); + MOV_I(&fp, ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -325,10 +325,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOVI(&fp, EBX, loop_count); + MOV_I(&fp, EBX, loop_count); ffts_align_mem16(&fp, 8); #else - MOVI(&fp, ECX, loop_count); + MOV_I(&fp, ECX, loop_count); ffts_align_mem16(&fp, 9); #endif @@ -343,38 +343,26 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += len; } -#ifdef _M_X64 - /* generate function */ - MOVAPS2(&fp, XMM3, RSI); - - /* set "pointer" to twiddle factors */ - MOV(&fp, RDI, RCX, 0x20, 0); -#else - /* copy function */ - assert((char*) x4 > (char*) x_init); - len = (char*) x4 - (char*) x_init; - memcpy(fp, x_init, len); - fp += len; -#endif + generate_transform_init(&fp); - /* generate subtransform calls */ + /* generate subtransform calls */ count = 2; while (pps[0]) { size_t ws_is; if (!pN) { #ifdef _M_X64 - MOVI(&fp, EBX, pps[0]); + MOV_I(&fp, EBX, pps[0]); #else - MOVI(&fp, ECX, pps[0] / 4); + MOV_I(&fp, ECX, pps[0] / 4); #endif } else { int offset = (4 * pps[1]) - pAddr; if (offset) { #ifdef _M_X64 - ADDI(&fp, R8, offset); + ADD_I(&fp, R8, offset); #else - ADDI(&fp, RDX, offset); + ADD_I(&fp, RDX, offset); #endif } @@ -394,9 +382,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - ADDI(&fp, RDI, offset); + ADD_I(&fp, RDI, offset); #else - ADDI(&fp, R8, offset); + ADD_I(&fp, R8, offset); #endif } @@ -701,7 +689,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N *fp++ = POP_LR(); count++; #else - generate_epilogue(&fp); + generate_epilogue(&fp); #endif // *fp++ = B(14); count++; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index abb5008..fa67a32 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -35,6 +35,7 @@ #define FFTS_CODEGEN_SSE_H #include +#include void neon_x4(float *, size_t, float *); void neon_x8(float *, size_t, float *); @@ -90,8 +91,8 @@ extern const uint32_t sse_leaf_oe_offsets[8]; #define XMM0 (XMM_REG | 0x0) #define XMM1 (XMM_REG | 0x1) -#define XMM2 (XMM_REG | 0x2) -#define XMM3 (XMM_REG | 0x3) +#define XMM2 (XMM_REG | 0x2) +#define XMM3 (XMM_REG | 0x3) #define XMM4 (XMM_REG | 0x4) #define XMM5 (XMM_REG | 0x5) #define XMM6 (XMM_REG | 0x6) @@ -111,7 +112,26 @@ extern const uint32_t sse_leaf_oe_offsets[8]; static void IMM8(uint8_t **p, int32_t imm); static void IMM32(uint8_t **p, int32_t imm); -static void ADDI(uint8_t **p, uint8_t dst, int32_t imm) +static FFTS_INLINE void ADDPS(uint8_t **p, uint8_t reg2, uint8_t reg1) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + /* esacape opcode */ + *(*p)++ = 0x0F; + + /* opcode */ + *(*p)++ = 0x58; + *(*p)++ = 0xC0 | r1 | (r2 << 3); +} + +/* Immediate */ +static void ADD_I(uint8_t **p, uint8_t dst, int32_t imm) { if (dst >= 8) { *(*p)++ = 0x49; @@ -201,297 +221,268 @@ static void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) ADDRMODE(p, dst, base, disp); } -static FFTS_INLINE void MOV(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) +static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } - - if (is_store) { - *(*p)++ = 0x89; - } else { - *(*p)++ = 0x8B; - } - - if (disp == 0) { - *(*p)++ = r2 | (r1 << 3); + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; - if (r2 == 4) { - *(*p)++ = 0x24; - } - } else if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r2 | (r1 << 3); + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } - if (r2 == 4) { - *(*p)++ = 0x24; - } + /* esacape opcode */ + *(*p)++ = 0x0F; - IMM8(p, disp); + /* opcode */ + if (is_store) { + *(*p)++ = 0x29; } else { - *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); - - if (r2 == 4) { - *(*p)++ = 0x24; - } - - IMM32(p, disp); + *(*p)++ = 0x28; } -} - -static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - /* opcode */ - if (is_store) { - *(*p)++ = 0x29; - } else { - *(*p)++ = 0x28; - } + r = r1 | (r2 << 3); - r = r1 | (r2 << 3); - - if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { - assert(disp == 0); - *(*p)++ = 0xC0 | r; - } else { - assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); + if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { + assert(disp == 0); + *(*p)++ = 0xC0 | r; + } else { + assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); - if (disp == 0 && r1 != 5) { - *(*p)++ = r; + if (disp == 0 && r1 != 5) { + *(*p)++ = r; - if (r1 == 4) { - *(*p)++ = 0x24; - } - } else { - if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r; + if (r1 == 4) { + *(*p)++ = 0x24; + } + } else { + if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r; - if (r1 == 4) { - *(*p)++ = 0x24; - } + if (r1 == 4) { + *(*p)++ = 0x24; + } - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r; + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r; - if (r1 == 4) { - *(*p)++ = 0x24; - } + if (r1 == 4) { + *(*p)++ = 0x24; + } - IMM32(p, disp); - } - } - } + IMM32(p, disp); + } + } + } } static FFTS_INLINE void MOVAPS2(uint8_t **p, uint8_t reg1, uint8_t reg2) { - if (reg1 & XMM_REG) { - MOVAPS(p, reg2, reg1, 0, 0); - } else { - MOVAPS(p, reg1, reg2, 0, 1); - } + if (reg1 & XMM_REG) { + MOVAPS(p, reg2, reg1, 0, 0); + } else { + MOVAPS(p, reg1, reg2, 0, 1); + } } static FFTS_INLINE void MOVAPS3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) { - if (reg1 & XMM_REG) { - MOVAPS(p, (uint8_t) op2, reg1, op3, 0); - } else { - MOVAPS(p, reg1, (uint8_t) op3, op2, 1); - } + if (reg1 & XMM_REG) { + MOVAPS(p, (uint8_t) op2, reg1, op3, 0); + } else { + MOVAPS(p, reg1, (uint8_t) op3, op2, 1); + } } static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; - /* mandatory prefix */ - *(*p)++ = 0x66; + /* mandatory prefix */ + *(*p)++ = 0x66; - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } - /* esacape opcode */ - *(*p)++ = 0x0F; + /* esacape opcode */ + *(*p)++ = 0x0F; - /* opcode */ - if (is_store) { - *(*p)++ = 0x7F; - } else { - *(*p)++ = 0x6F; - } + /* opcode */ + if (is_store) { + *(*p)++ = 0x7F; + } else { + *(*p)++ = 0x6F; + } - r = r1 | (r2 << 3); + r = r1 | (r2 << 3); - if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { - assert(disp == 0); - *(*p)++ = 0xC0 | r; - } else { - assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); + if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { + assert(disp == 0); + *(*p)++ = 0xC0 | r; + } else { + assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); - if (disp == 0 && r1 != 5) { - *(*p)++ = r; + if (disp == 0 && r1 != 5) { + *(*p)++ = r; - if (r1 == 4) { - *(*p)++ = 0x24; - } - } else { - if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r; + if (r1 == 4) { + *(*p)++ = 0x24; + } + } else { + if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r; - if (r1 == 4) { - *(*p)++ = 0x24; - } + if (r1 == 4) { + *(*p)++ = 0x24; + } - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r; + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r; - if (r1 == 4) { - *(*p)++ = 0x24; - } + if (r1 == 4) { + *(*p)++ = 0x24; + } - IMM32(p, disp); - } - } - } + IMM32(p, disp); + } + } + } } static FFTS_INLINE void MOVDQA2(uint8_t **p, uint8_t reg1, uint8_t reg2) { - if (reg1 & XMM_REG) { - MOVDQA(p, reg2, reg1, 0, 0); - } else { - MOVDQA(p, reg1, reg2, 0, 1); - } + if (reg1 & XMM_REG) { + MOVDQA(p, reg2, reg1, 0, 0); + } else { + MOVDQA(p, reg1, reg2, 0, 1); + } } static FFTS_INLINE void MOVDQA3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) { - if (reg1 & XMM_REG) { - MOVDQA(p, (uint8_t) op2, reg1, op3, 0); - } else { - MOVDQA(p, reg1, (uint8_t) op3, op2, 1); - } + if (reg1 & XMM_REG) { + MOVDQA(p, (uint8_t) op2, reg1, op3, 0); + } else { + MOVDQA(p, reg1, (uint8_t) op3, op2, 1); + } } -static void MOVI(uint8_t **p, uint8_t dst, uint64_t imm) +static FFTS_INLINE void MOV_D(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { - /* REX prefix */ - if (dst >= 8 || imm > UINT32_MAX) { - uint8_t val = 0x40; - - if (dst >= 8) { - val |= 1; - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); - if (imm > UINT32_MAX) { - val |= 8; - } + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x49; + } else { + *(*p)++ = 0x48; + } - *(*p)++ = val; + if (is_store) { + *(*p)++ = 0x89; + } else { + *(*p)++ = 0x8B; } - /* opcode */ - *(*p)++ = 0xb8 | (dst & 0x7); - - if (imm > UINT32_MAX) { - IMM64(p, imm); - } else { - IMM32(p, imm); - } -} + if (disp == 0) { + *(*p)++ = r2 | (r1 << 3); -static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; + if (r2 == 4) { + *(*p)++ = 0x24; + } + } else if (disp <= 127 && disp >= -128) { + *(*p)++ = 0x40 | r2 | (r1 << 3); - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } + if (r2 == 4) { + *(*p)++ = 0x24; + } - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0x59; - - r = r1 | (r2 << 3); + IMM8(p, disp); + } else { + *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); - if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { - assert(disp == 0); - *(*p)++ = 0xC0 | r; - } else { - assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); + if (r2 == 4) { + *(*p)++ = 0x24; + } - if (disp == 0 && r1 != 5) { - *(*p)++ = r; + IMM32(p, disp); + } +} - if (r1 == 4) { - *(*p)++ = 0x24; - } - } else { - if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r; +static void MOV_I(uint8_t **p, uint8_t dst, uint64_t imm) +{ + /* REX prefix */ + if (dst >= 8 || imm > UINT32_MAX) { + uint8_t val = 0x40; - if (r1 == 4) { - *(*p)++ = 0x24; - } + if (dst >= 8) { + val |= 1; + } - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r; + if (imm > UINT32_MAX) { + val |= 8; + } - if (r1 == 4) { - *(*p)++ = 0x24; - } + *(*p)++ = val; + } - IMM32(p, disp); - } - } - } + /* opcode */ + *(*p)++ = 0xb8 | (dst & 0x7); + + if (imm > UINT32_MAX) { + IMM64(p, imm); + } else { + IMM32(p, imm); + } } -static FFTS_INLINE void MULPS2(uint8_t **p, uint8_t reg1, uint8_t reg2) +static FFTS_INLINE void MOV_R(uint8_t **p, uint8_t reg1, uint8_t reg2, int is_store) { - if (reg1 & XMM_REG) { - MULPS(p, reg2, reg1, 0, 0); - } else { - MULPS(p, reg1, reg2, 0, 1); - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x48 | ((reg2 & 8) >> 3) | ((reg1 & 8) >> 1); + } else { + *(*p)++ = 0x48; + } + + if (is_store) { + *(*p)++ = 0x89; + } else { + *(*p)++ = 0x8B; + } + + *(*p)++ = 0xC0 | r2 | (r1 << 3); + + if (r2 == 4) { + *(*p)++ = 0x24; + } } -static FFTS_INLINE void MULPS3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) +static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg2, uint8_t reg1) { - if (reg1 & XMM_REG) { - MULPS(p, (uint8_t) op2, reg1, op3, 0); - } else { - MULPS(p, reg1, (uint8_t) op3, op2, 1); - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + /* esacape opcode */ + *(*p)++ = 0x0F; + + /* opcode */ + *(*p)++ = 0x59; + *(*p)++ = 0xC0 | r1 | (r2 << 3); } static void POP(uint8_t **p, uint8_t reg) @@ -546,7 +537,48 @@ static void SHIFT(uint8_t **p, uint8_t reg, int shift) } } -static void SUBI(uint8_t **p, uint8_t dst, int32_t imm) +static FFTS_INLINE void SHUFPS(uint8_t **p, uint8_t reg2, uint8_t reg1, const int select) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + uint8_t r; + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + /* esacape opcode */ + *(*p)++ = 0x0F; + + /* opcode */ + *(*p)++ = 0xC6; + + r = r1 | (r2 << 3); + + *(*p)++ = 0xC0 | r; + *(*p)++ = (select & 0xFF); +} + +static FFTS_INLINE void SUBPS(uint8_t **p, uint8_t reg2, uint8_t reg1) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + /* esacape opcode */ + *(*p)++ = 0x0F; + + /* opcode */ + *(*p)++ = 0x5C; + *(*p)++ = 0xC0 | r1 | (r2 << 3); +} + +static void SUB_I(uint8_t **p, uint8_t dst, int32_t imm) { if (dst >= 8) { *(*p)++ = 0x49; @@ -571,12 +603,34 @@ static void SUBI(uint8_t **p, uint8_t dst, int32_t imm) static FFTS_INLINE void XOR2(uint8_t **p, uint8_t reg1, uint8_t reg2) { - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + *(*p)++ = 0x31; + *(*p)++ = 0xC0 | r2 | (r1 << 3); +} + +static FFTS_INLINE void XORPS(uint8_t **p, uint8_t reg2, uint8_t reg1) +{ + uint8_t r1 = (reg1 & 7); + uint8_t r2 = (reg2 & 7); + + /* REX prefix */ + if ((reg1 & 8) || (reg2 & 8)) { + *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); + } + + /* esacape opcode */ + *(*p)++ = 0x0F; - *(*p)++ = 0x31; - *(*p)++ = 0xC0 | (reg2 & 7) | ((reg1 & 7) << 3); + /* opcode */ + *(*p)++ = 0x57; + *(*p)++ = 0xC0 | r1 | (r2 << 3); } static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) @@ -662,78 +716,71 @@ static FFTS_INLINE void ffts_align_mem16(uint8_t **p, uint32_t offset) ffts_insert_nops(p, r); } -static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) -{ - insns_t *x_4_addr; - size_t len; - - /* align call destination */ - ffts_align_mem16(fp, 0); - x_4_addr = *fp; - - /* copy function */ - assert((char*) x8_soft > (char*) x4); - len = (char*) x8_soft - (char*) x4; - memcpy(*fp, x4, len); - *fp += len; - - return x_4_addr; -} - -static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) +static FFTS_INLINE void generate_epilogue(insns_t **fp) { - insns_t *x_8_addr; - size_t len; - - /* align call destination */ - ffts_align_mem16(fp, 0); - x_8_addr = *fp; - - /* align loop/jump destination */ #ifdef _M_X64 - ffts_align_mem16(fp, 6); + /* restore nonvolatile registers */ + MOVDQA3(fp, XMM6, RSP, 0); + MOVDQA3(fp, XMM7, RSP, 16); + MOVDQA3(fp, XMM8, RSP, 32); + MOVDQA3(fp, XMM9, RSP, 48); + MOVDQA3(fp, XMM10, RSP, 64); + MOVDQA3(fp, XMM11, RSP, 80); + MOVDQA3(fp, XMM12, RSP, 96); + MOVDQA3(fp, XMM13, RSP, 112); + MOVDQA3(fp, XMM14, RSP, 128); + MOVDQA3(fp, XMM15, RSP, 144); + + /* restore stack */ + ADD_I(fp, RSP, 168); + + /* restore the last 3 registers from the shadow space */ + MOV_D(fp, RBX, RSP, 8, 0); + MOV_D(fp, RSI, RSP, 16, 0); + MOV_D(fp, RDI, RSP, 24, 0); #else - ffts_align_mem16(fp, 5); + POP(fp, R15); + POP(fp, R14); + POP(fp, R13); + POP(fp, R12); + POP(fp, R11); + POP(fp, R10); + POP(fp, RBX); + POP(fp, RBP); #endif - /* copy function */ - assert((char*) x8_soft_end > (char*) x8_soft); - len = (char*) x8_soft_end - (char*) x8_soft; - memcpy(*fp, x8_soft, len); - *fp += len; - - return x_8_addr; + RET(fp); } static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) { - insns_t *start; + insns_t *start; - /* align call destination */ - ffts_align_mem16(fp, 0); - start = *fp; + /* align call destination */ + ffts_align_mem16(fp, 0); + start = *fp; /* save nonvolatile registers */ #ifdef _M_X64 /* use the shadow space to save first 3 registers */ - MOV(fp, RBX, RSP, 8, 1); - MOV(fp, RSI, RSP, 16, 1); - MOV(fp, RDI, RSP, 24, 1); + MOV_D(fp, RBX, RSP, 8, 1); + MOV_D(fp, RSI, RSP, 16, 1); + MOV_D(fp, RDI, RSP, 24, 1); /* reserve space.. */ - SUBI(fp, RSP, 168); - - /* to save XMM6-XMM15 registers */ - MOVDQA3(fp, RSP, 0, XMM6); - MOVDQA3(fp, RSP, 16, XMM7); - MOVDQA3(fp, RSP, 32, XMM8); - MOVDQA3(fp, RSP, 48, XMM9); - MOVDQA3(fp, RSP, 64, XMM10); - MOVDQA3(fp, RSP, 80, XMM11); - MOVDQA3(fp, RSP, 96, XMM12); - MOVDQA3(fp, RSP, 112, XMM13); - MOVDQA3(fp, RSP, 128, XMM14); - MOVDQA3(fp, RSP, 144, XMM15); + SUB_I(fp, RSP, 168); + + /* to save XMM6-XMM15 registers */ + MOVDQA3(fp, RSP, 0, XMM6); + MOVDQA3(fp, RSP, 16, XMM7); + MOVDQA3(fp, RSP, 32, XMM8); + MOVDQA3(fp, RSP, 48, XMM9); + MOVDQA3(fp, RSP, 64, XMM10); + MOVDQA3(fp, RSP, 80, XMM11); + MOVDQA3(fp, RSP, 96, XMM12); + MOVDQA3(fp, RSP, 112, XMM13); + MOVDQA3(fp, RSP, 128, XMM14); + MOVDQA3(fp, RSP, 144, XMM15); #else PUSH(fp, RBP); PUSH(fp, RBX); @@ -745,43 +792,425 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) PUSH(fp, R15); #endif - return start; + return start; } -static FFTS_INLINE void generate_epilogue(insns_t **fp) +static FFTS_INLINE void generate_transform_init(insns_t **fp) { #ifdef _M_X64 - /* restore nonvolatile registers */ - MOVDQA3(fp, XMM6, RSP, 0); - MOVDQA3(fp, XMM7, RSP, 16); - MOVDQA3(fp, XMM8, RSP, 32); - MOVDQA3(fp, XMM9, RSP, 48); - MOVDQA3(fp, XMM10, RSP, 64); - MOVDQA3(fp, XMM11, RSP, 80); - MOVDQA3(fp, XMM12, RSP, 96); - MOVDQA3(fp, XMM13, RSP, 112); - MOVDQA3(fp, XMM14, RSP, 128); - MOVDQA3(fp, XMM15, RSP, 144); - - /* restore stack */ - ADDI(fp, RSP, 168); + /* generate function */ + MOVAPS2(fp, XMM3, RSI); - /* restore the last 3 registers from the shadow space */ - MOV(fp, RBX, RSP, 8, 0); - MOV(fp, RSI, RSP, 16, 0); - MOV(fp, RDI, RSP, 24, 0); + /* set "pointer" to twiddle factors */ + MOV_D(fp, RDI, RCX, 0x20, 0); #else - POP(fp, R15); - POP(fp, R14); - POP(fp, R13); - POP(fp, R12); - POP(fp, R11); - POP(fp, R10); - POP(fp, RBX); - POP(fp, RBP); + size_t len; + + /* copy function */ + assert((char*) x4 > (char*) x_init); + len = (char*) x4 - (char*) x_init; + memcpy(*fp, x_init, len); + *fp += len; #endif +} + +static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) +{ + insns_t *x_4_addr; + size_t len; + + /* align call destination */ + ffts_align_mem16(fp, 0); + x_4_addr = *fp; + /* copy function */ + assert((char*) x8_soft > (char*) x4); + len = (char*) x8_soft - (char*) x4; + memcpy(*fp, x4, len); + *fp += len; + + return x_4_addr; +} + +static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) +{ + insns_t *x_8_addr; +#ifdef _M_X64 + insns_t *x8_soft_loop; +#else + size_t len; +#endif + + /* align call destination */ + ffts_align_mem16(fp, 0); + x_8_addr = *fp; + + /* align loop/jump destination */ +#ifdef _M_X64 + ffts_align_mem16(fp, 6); +#else + ffts_align_mem16(fp, 5); +#endif + +#ifdef _M_X64 + /* input */ + MOV_R(fp, RDI, RAX, 1); + + /* output */ + MOV_R(fp, R8, RCX, 1); + + /* lea rdx, [r8 + rbx] */ + /* loop stop (output + output_stride) */ + *(*fp)++ = 0x49; + *(*fp)++ = 0x8D; + *(*fp)++ = 0x14; + *(*fp)++ = 0x18; + + /* lea rsi, [rbx + rbx*2] */ + /* 3 * output_stride */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x8D; + *(*fp)++ = 0x34; + *(*fp)++ = 0x5B; + + /* lea r10, [rbx + rbx*4] */ + /* 5 * output_stride */ + *(*fp)++ = 0x4C; + *(*fp)++ = 0x8D; + *(*fp)++ = 0x14; + *(*fp)++ = 0x9B; + + /* lea r11, [rsi + rbx*4] */ + /* 7 * output_stride */ + *(*fp)++ = 0x4C; + *(*fp)++ = 0x8D; + *(*fp)++ = 0x1C; + *(*fp)++ = 0x9E; + + x8_soft_loop = *fp; + assert(!(((uintptr_t) x8_soft_loop) & 0xF)); + + /* movaps xmm9, [rax] */ + /* input + 0 * input_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x08; + + /* movaps xmm6, [rcx + rbx*2] */ + /* output + 2 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x34; + *(*fp)++ = 0x59; + + /* movaps xmm11, xmm9 */ + *(*fp)++ = 0x45; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xD9; + + /* movaps xmm7, [rcx + rsi] */ + /* output + 3 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x3C; + *(*fp)++ = 0x31; + + /* movaps xmm8, [rax + 0x10] */ + /* input + 1 * input_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x40; + *(*fp)++ = 0x10; + + MULPS(fp, XMM11, XMM6); + MULPS(fp, XMM9, XMM7); + SHUFPS(fp, XMM6, XMM6, 0xB1); + MULPS(fp, XMM6, XMM8); + SHUFPS(fp, XMM7, XMM7, 0xB1); + SUBPS(fp, XMM11, XMM6); + MULPS(fp, XMM8, XMM7); + + /* movaps xmm10, xmm11 */ + *(*fp)++ = 0x45; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xD3; + + ADDPS(fp, XMM9, XMM8); + + /* movaps xmm15, [rax + 0x20] */ + /* input + 2 * input_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x78; + *(*fp)++ = 0x20; + + ADDPS(fp, XMM10, XMM9); + SUBPS(fp, XMM11, XMM9); + + /* movaps xmm5, [rcx] */ + /* output + 0 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x29; + + /* movaps xmm6,xmm15 */ + *(*fp)++ = 0x41; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xF7; + + /* movaps xmm12, [rcx + rbx*4] */ + /* output + 4 * output_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x24; + *(*fp)++ = 0x99; + + /* movaps xmm2, xmm5 */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xD5; + + /* movaps xmm13, [rcx + rsi*2] */ + /* output + 6 * output_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x2C; + *(*fp)++ = 0x71; + + XORPS(fp, XMM11, XMM3); + + /* movaps xmm14, [rax + 0x30] */ + /* input + 3 * input_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x70; + *(*fp)++ = 0x30; + + SUBPS(fp, XMM2, XMM10); + MULPS(fp, XMM6, XMM12); + ADDPS(fp, XMM5, XMM10); + MULPS(fp, XMM15, XMM13); + + /* movaps xmm10, [rax + 0x40] */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x50; + *(*fp)++ = 0x40; + + /* movaps xmm0, xmm5 */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xC5; + + SHUFPS(fp, XMM12, XMM12, 0xB1); + SHUFPS(fp, XMM13, XMM13, 0xB1); + MULPS(fp, XMM12, XMM14); + MULPS(fp, XMM14, XMM13); + SUBPS(fp, XMM6, XMM12); + ADDPS(fp, XMM15, XMM14); + + /* movaps xmm7, [rcx + r10] */ + *(*fp)++ = 0x42; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x3C; + *(*fp)++ = 0x11; + + /* movaps xmm13, xmm10 */ + *(*fp)++ = 0x45; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xEA; + + /* movaps xmm8, [rcx + r11] */ + *(*fp)++ = 0x46; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x04; + *(*fp)++ = 0x19; + + /* movaps xmm12, xmm6 */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xE6; + + /* movaps xmm9, [rax + 0x50] */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x48; + *(*fp)++ = 0x50; + + /* input + 6 * input_stride */ + ADD_I(fp, RAX, 0x60); + + MULPS(fp, XMM13, XMM7); + SUBPS(fp, XMM6, XMM15); + ADDPS(fp, XMM12, XMM15); + MULPS(fp, XMM10, XMM8); + SUBPS(fp, XMM0, XMM12); + ADDPS(fp, XMM5, XMM12); + SHUFPS(fp, XMM7, XMM7, 0xB1); + XORPS(fp, XMM6, XMM3); + SHUFPS(fp, XMM8, XMM8, 0xB1); + + /* movaps xmm12, xmm2 */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xE2; + + MULPS(fp, XMM7, XMM9); + MULPS(fp, XMM9, XMM8); + SUBPS(fp, XMM13, XMM7); + ADDPS(fp, XMM10, XMM9); + + /* movaps xmm4, [rcx + rbx] */ + /* output + 1 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0x24; + *(*fp)++ = 0x19; + + SHUFPS(fp, XMM11, XMM11, 0xB1); + + /* movaps xmm1, xmm4 */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xCC; + + SHUFPS(fp, XMM6, XMM6, 0xB1); + ADDPS(fp, XMM1, XMM11); + SUBPS(fp, XMM4, XMM11); + ADDPS(fp, XMM12, XMM6); + SUBPS(fp, XMM2, XMM6); + + /* movaps xmm11, xmm13 */ + *(*fp)++ = 0x45; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xDD; + + /* movaps xmm14, xmm4 */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xF4; + + /* movaps xmm6, xmm1 */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x28; + *(*fp)++ = 0xF1; + + SUBPS(fp, XMM13, XMM10); + ADDPS(fp, XMM11, XMM10); + XORPS(fp, XMM13, XMM3); + ADDPS(fp, XMM4, XMM11); + SUBPS(fp, XMM14, XMM11); + SHUFPS(fp, XMM13, XMM13, 0xB1); + + /* movaps [rcx], xmm5 */ + /* output + 0 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x29; + + /* movaps [rcx + rbx], xmm4 */ + /* output + 1 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x24; + *(*fp)++ = 0x19; + + /* movaps [rcx + rbx*2], xmm2 */ + /* output + 2 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x14; + *(*fp)++ = 0x59; + + SUBPS(fp, XMM1, XMM13); + ADDPS(fp, XMM6, XMM13); + + /* movaps [rcx + rsi], xmm1 */ + /* output + 3 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x0C; + *(*fp)++ = 0x31; + + /* movaps [rcx + rbx*4], xmm0 */ + /* output + 4 * output_stride */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x04; + *(*fp)++ = 0x99; + + /* movaps [rcx + r10], xmm14 */ + /* output + 5 * output_stride */ + *(*fp)++ = 0x46; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x34; + *(*fp)++ = 0x11; + + /* movaps [rcx + rsi*2], xmm12 */ + /* output + 6 * output_stride */ + *(*fp)++ = 0x44; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x24; + *(*fp)++ = 0x71; + + /* movaps [rcx + r11], xmm6 */ + /* output + 7 * output_stride */ + *(*fp)++ = 0x42; + *(*fp)++ = 0x0F; + *(*fp)++ = 0x29; + *(*fp)++ = 0x34; + *(*fp)++ = 0x19; + + /* add rcx, 0x10 */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x83; + *(*fp)++ = 0xC1; + *(*fp)++ = 0x10; + + /* cmp rcx, rdx */ + *(*fp)++ = 0x48; + *(*fp)++ = 0x39; + *(*fp)++ = 0xD1; + + /* jne [x8_soft_loop] */ + *(*fp)++ = 0x0F; + *(*fp)++ = 0x85; + *(*fp)++ = 0x9E; + *(*fp)++ = 0xFE; + *(*fp)++ = 0xFF; + *(*fp)++ = 0xFF; + + /* ret */ RET(fp); +#else + /* copy function */ + assert((char*) x8_soft_end >= (char*) x8_soft); + len = (char*) x8_soft_end - (char*) x8_soft; + memcpy(*fp, x8_soft, len); + *fp += len; +#endif + + return x_8_addr; } #endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file diff --git a/src/sequitur.h b/src/sequitur.h index 7429115..d459c56 100644 --- a/src/sequitur.h +++ b/src/sequitur.h @@ -32,417 +32,417 @@ */ typedef struct _sym_t { - int c; - struct _sym_t *pPrev, *pNext; - struct _seq_rule_t *r; - int offset; + int c; + struct _sym_t *pPrev, *pNext; + struct _seq_rule_t *r; + int offset; } sym_t; typedef struct _seq_rule_t { - int c; - sym_t *ss; - struct _seq_rule_t *pPrev, *pNext; - int count; - int length; + int c; + sym_t *ss; + struct _seq_rule_t *pPrev, *pNext; + int count; + int length; } seq_rule_t; void sym_tail_insert(sym_t **ss, sym_t *s) { - if (!*ss) { - *ss = s; - s->pPrev = s->pNext = NULL; - } else { - while (*ss) { - s->pPrev = *ss; - ss = &(*ss)->pNext; - } - - *ss = s; - } + if (!*ss) { + *ss = s; + s->pPrev = s->pNext = NULL; + } else { + while (*ss) { + s->pPrev = *ss; + ss = &(*ss)->pNext; + } + + *ss = s; + } } sym_t* sym_init(int c) { - sym_t *s; - - s = (sym_t*) malloc(sizeof(*s)); - if (!s) { - return NULL; - } - - s->c = c; - s->pPrev = s->pNext = NULL; - s->r = NULL; - - return s; + sym_t *s; + + s = (sym_t*) malloc(sizeof(*s)); + if (!s) { + return NULL; + } + + s->c = c; + s->pPrev = s->pNext = NULL; + s->r = NULL; + + return s; } sym_t* sym_init_from_sym(sym_t *s2) { - sym_t *s; + sym_t *s; - s = (sym_t*) malloc(sizeof(*s)); - if (!s) { - return NULL; - } + s = (sym_t*) malloc(sizeof(*s)); + if (!s) { + return NULL; + } - s->c = s2->c; - s->pPrev = s->pNext = NULL; - s->r = s2->r; - s->offset = s2->offset; + s->c = s2->c; + s->pPrev = s->pNext = NULL; + s->r = s2->r; + s->offset = s2->offset; - return s; + return s; } seq_rule_t* seq_init_rule(int c) { - seq_rule_t *G; - - G = (seq_rule_t *)malloc(sizeof(*G)); - if (!G) { - return NULL; - } - - G->c = c; - G->count = 2; - G->ss = NULL; - G->pPrev = NULL; - G->pNext = NULL; - - return G; + seq_rule_t *G; + + G = (seq_rule_t *)malloc(sizeof(*G)); + if (!G) { + return NULL; + } + + G->c = c; + G->count = 2; + G->ss = NULL; + G->pPrev = NULL; + G->pNext = NULL; + + return G; } seq_rule_t* seq_grammer_insert_new_rule(seq_rule_t *G, char r, sym_t *a, sym_t *b) { - sym_t *sa, *sb; + sym_t *sa, *sb; - while (G->pNext) { - G = G->pNext; - } + while (G->pNext) { + G = G->pNext; + } - G->pNext = seq_init_rule(r); - if (!G->pNext) { - return NULL; - } + G->pNext = seq_init_rule(r); + if (!G->pNext) { + return NULL; + } - sa = sym_init_from_sym(a); - if (!sa) { - goto cleanup_pnext; - } + sa = sym_init_from_sym(a); + if (!sa) { + goto cleanup_pnext; + } - sb = sym_init_from_sym(b); - if (!sb) { - goto cleanup_sa; - } + sb = sym_init_from_sym(b); + if (!sb) { + goto cleanup_sa; + } - sb->offset = sb->offset - sa->offset; - sa->offset = 0; - sym_tail_insert(&G->pNext->ss, sa); - sym_tail_insert(&G->pNext->ss, sb); - return G->pNext; + sb->offset = sb->offset - sa->offset; + sa->offset = 0; + sym_tail_insert(&G->pNext->ss, sa); + sym_tail_insert(&G->pNext->ss, sb); + return G->pNext; cleanup_sa: - free(sa); + free(sa); cleanup_pnext: - free(G->pNext); - G->pNext = NULL; + free(G->pNext); + G->pNext = NULL; - return NULL; + return NULL; } sym_t* sym_match_digram(sym_t *s, sym_t *term, sym_t *a, sym_t *b) { - while (s != term) { - if (s->c == a->c && s->pNext->c == b->c && - s->pNext->offset - s->offset == b->offset-a->offset) { - return s; - } + while (s != term) { + if (s->c == a->c && s->pNext->c == b->c && + s->pNext->offset - s->offset == b->offset-a->offset) { + return s; + } - s = s->pNext; - } + s = s->pNext; + } - return NULL; + return NULL; } seq_rule_t* seq_match_digram(seq_rule_t *R, sym_t *a, sym_t *b) { - while (R) { - if (R->ss->c == a->c && R->ss->pNext->c == b->c && - R->ss->pNext->offset - R->ss->offset == b->offset - a->offset) { - return R; - } + while (R) { + if (R->ss->c == a->c && R->ss->pNext->c == b->c && + R->ss->pNext->offset - R->ss->offset == b->offset - a->offset) { + return R; + } - R = R->pNext; - } + R = R->pNext; + } - return NULL; + return NULL; } sym_t* sym_tail(sym_t *s) { - while (s->pNext) { - s = s->pNext; - } + while (s->pNext) { + s = s->pNext; + } - return s; + return s; } int sym_count(sym_t *s) { - int count = 0; + int count = 0; - while (s) { - count++; - s = s->pNext; - } + while (s) { + count++; + s = s->pNext; + } - return count; + return count; } sym_t* sym_copylist(sym_t *s) { - sym_t *head = NULL; - sym_t *prev = head; - - while (s) { - sym_t *copy = sym_init_from_sym(s); - if (!copy) { - return NULL; - } - - copy->pPrev = prev; - - if (prev) { - prev->pNext = copy; - } - - if (!head) { - head = copy; - } - - prev = copy; - s = s->pNext; - } - - return head; + sym_t *head = NULL; + sym_t *prev = head; + + while (s) { + sym_t *copy = sym_init_from_sym(s); + if (!copy) { + return NULL; + } + + copy->pPrev = prev; + + if (prev) { + prev->pNext = copy; + } + + if (!head) { + head = copy; + } + + prev = copy; + s = s->pNext; + } + + return head; } void seq_enforce_uniqueness(seq_rule_t *G) { - seq_rule_t *R = G;//->pNext; - seq_rule_t **ppr = &G->pNext; + seq_rule_t *R = G;//->pNext; + seq_rule_t **ppr = &G->pNext; - while (R) { - if (R == G || R->count > 1) { - sym_t *s = R->ss; - sym_t **pp = &R->ss; + while (R) { + if (R == G || R->count > 1) { + sym_t *s = R->ss; + sym_t **pp = &R->ss; - while (s) { - if (s->r && s->r->count == 1) { - sym_t *temp_itr; + while (s) { + if (s->r && s->r->count == 1) { + sym_t *temp_itr; + + *pp = s->r->ss; - *pp = s->r->ss; - temp_itr = s->r->ss; while (temp_itr) { temp_itr->offset += s->offset; temp_itr = temp_itr->pNext; } - - s->r->ss->pPrev = s->pPrev; - if (s->pNext) { - s->pNext->pPrev = sym_tail(s->r->ss); - } - - sym_tail(s->r->ss)->pNext = s->pNext; - s = s->r->ss; - continue; - } - - pp = &s->pNext; - s = s->pNext; - } - - ppr = &R->pNext; - } else { - *ppr = R->pNext; - } - - R = R->pNext; - } + + s->r->ss->pPrev = s->pPrev; + if (s->pNext) { + s->pNext->pPrev = sym_tail(s->r->ss); + } + + sym_tail(s->r->ss)->pNext = s->pNext; + s = s->r->ss; + continue; + } + + pp = &s->pNext; + s = s->pNext; + } + + ppr = &R->pNext; + } else { + *ppr = R->pNext; + } + + R = R->pNext; + } } void seq_merge_small_rules(seq_rule_t *G, int thresh) { - seq_rule_t *R = G; + seq_rule_t *R = G; - while (R) { - if (sym_count(R->ss) <= thresh) { + while (R) { + if (sym_count(R->ss) <= thresh) { //printf("count %d > %d for %d\n", sym_count(R->ss), thresh, R->c); - sym_t *s = R->ss; - sym_t **pp = &R->ss; - - while (s) { - if (s->r) { - sym_t *copylist; - sym_t *copylist_itr; - - s->r->count--; - - copylist = sym_copylist(s->r->ss); - if (!copylist) { - return; - } + sym_t *s = R->ss; + sym_t **pp = &R->ss; + + while (s) { + if (s->r) { + sym_t *copylist; + sym_t *copylist_itr; + + s->r->count--; + + copylist = sym_copylist(s->r->ss); + if (!copylist) { + return; + } copylist_itr = copylist; while (copylist_itr) { copylist_itr->offset += s->offset; copylist_itr = copylist_itr->pNext; } - - *pp = copylist; - copylist->pPrev = s->pPrev; - if (s->pNext) { - s->pNext->pPrev = sym_tail(copylist); - } - - sym_tail(copylist)->pNext = s->pNext; - pp = &(sym_tail(copylist)->pNext); - s = sym_tail(copylist)->pNext; - continue; - } - - pp = &s->pNext; - s = s->pNext; - } - } - - R = R->pNext; - } + + *pp = copylist; + copylist->pPrev = s->pPrev; + if (s->pNext) { + s->pNext->pPrev = sym_tail(copylist); + } + + sym_tail(copylist)->pNext = s->pNext; + pp = &(sym_tail(copylist)->pNext); + s = sym_tail(copylist)->pNext; + continue; + } + + pp = &s->pNext; + s = s->pNext; + } + } + + R = R->pNext; + } seq_enforce_uniqueness(G); } void seq_extract_hierarchy(seq_rule_t *G) { - int next_rule = -2; - sym_t *cursym = G->ss; - - while (cursym) { - sym_t *m = NULL; - seq_rule_t *mr = NULL; - - if (cursym->pPrev && cursym->pPrev->pPrev) { - mr = seq_match_digram(G->pNext, cursym->pPrev, cursym); - if (mr) { - if (cursym->pPrev->r) { - cursym->pPrev->r->count--; - } - - if(cursym->r) { - cursym->r->count--; - } - - mr->count++; - - cursym->pPrev->r = mr; - cursym->pPrev->c = mr->c; - cursym->pPrev->pNext = cursym->pNext; - cursym->pNext->pPrev = cursym->pPrev; - cursym = cursym->pPrev; - } - - m = sym_match_digram(G->ss, cursym->pPrev->pPrev, cursym->pPrev, cursym); - if (m) { - seq_rule_t *newr; - - if (cursym->pPrev->r) { - cursym->pPrev->r->count--; - } - - if (cursym->r) { - cursym->r->count--; - } - - newr = seq_grammer_insert_new_rule(G, next_rule, m, m->pNext); - if (!newr) { - return; - } - - m->r = newr; - m->c = next_rule; - m->pNext = m->pNext->pNext; - m->pNext->pPrev = m; - - cursym->pPrev->r = newr; - cursym->pPrev->c = next_rule; - cursym->pPrev->pNext = cursym->pNext; - cursym->pNext->pPrev = cursym->pPrev; - cursym = cursym->pPrev; - - next_rule--; - } - } - - if (!m && !mr) { - cursym = cursym->pNext; - } - } - - seq_enforce_uniqueness(G); - seq_merge_small_rules(G, 2); + int next_rule = -2; + sym_t *cursym = G->ss; + + while (cursym) { + sym_t *m = NULL; + seq_rule_t *mr = NULL; + + if (cursym->pPrev && cursym->pPrev->pPrev) { + mr = seq_match_digram(G->pNext, cursym->pPrev, cursym); + if (mr) { + if (cursym->pPrev->r) { + cursym->pPrev->r->count--; + } + + if(cursym->r) { + cursym->r->count--; + } + + mr->count++; + + cursym->pPrev->r = mr; + cursym->pPrev->c = mr->c; + cursym->pPrev->pNext = cursym->pNext; + cursym->pNext->pPrev = cursym->pPrev; + cursym = cursym->pPrev; + } + + m = sym_match_digram(G->ss, cursym->pPrev->pPrev, cursym->pPrev, cursym); + if (m) { + seq_rule_t *newr; + + if (cursym->pPrev->r) { + cursym->pPrev->r->count--; + } + + if (cursym->r) { + cursym->r->count--; + } + + newr = seq_grammer_insert_new_rule(G, next_rule, m, m->pNext); + if (!newr) { + return; + } + + m->r = newr; + m->c = next_rule; + m->pNext = m->pNext->pNext; + m->pNext->pPrev = m; + + cursym->pPrev->r = newr; + cursym->pPrev->c = next_rule; + cursym->pPrev->pNext = cursym->pNext; + cursym->pNext->pPrev = cursym->pPrev; + cursym = cursym->pPrev; + + next_rule--; + } + } + + if (!m && !mr) { + cursym = cursym->pNext; + } + } + + seq_enforce_uniqueness(G); + seq_merge_small_rules(G, 2); // seq_enforce_uniqueness(G); } void seq_compute_lengths(seq_rule_t *G) { - seq_rule_t *R = G->pNext; - sym_t *s; - int sum; - - while (R) { - sum = 0; - s = R->ss; - - while (s) { - if (s->c >= 0) { - if (s->offset + s->c > sum) { - sum = s->offset + s->c; - } - } - - if (s->c < 0) { - if (s->offset + s->r->length > sum) { - sum = s->offset + s->r->length; - } - } - - s = s->pNext; - } - - R->length = sum; - R = R->pNext; - } - - sum = 0; - s = G->ss; - - while (s) { - if (s->c >= 0) { - if (s->offset + s->c > sum) { - sum = s->offset + s->c; - } - } - - if (s->c < 0) { - if (s->offset + s->r->length > sum) { - sum = s->offset + s->r->length; - } - } - - s = s->pNext; - } - - G->length = sum; + seq_rule_t *R = G->pNext; + sym_t *s; + int sum; + + while (R) { + sum = 0; + s = R->ss; + + while (s) { + if (s->c >= 0) { + if (s->offset + s->c > sum) { + sum = s->offset + s->c; + } + } + + if (s->c < 0) { + if (s->offset + s->r->length > sum) { + sum = s->offset + s->r->length; + } + } + + s = s->pNext; + } + + R->length = sum; + R = R->pNext; + } + + sum = 0; + s = G->ss; + + while (s) { + if (s->c >= 0) { + if (s->offset + s->c > sum) { + sum = s->offset + s->c; + } + } + + if (s->c < 0) { + if (s->offset + s->r->length > sum) { + sum = s->offset + s->r->length; + } + } + + s = s->pNext; + } + + G->length = sum; } \ No newline at end of file -- cgit v1.1 From f898aaf891e175cc08d53f022acc9165e1acd2a6 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 6 Nov 2014 12:15:22 +0200 Subject: To prevent symbol name clashes we will prefix all symbols --- src/ffts.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/ffts.h b/src/ffts.h index 156a3b3..a8e27b8 100644 --- a/src/ffts.h +++ b/src/ffts.h @@ -46,6 +46,21 @@ #include #include +#define FFTS_PREFIX ffts + +#ifndef FFTS_CAT_PREFIX2 +#define FFTS_CAT_PREFIX2(a,b) a ## b +#endif + +#ifndef FFTS_CAT_PREFIX +#define FFTS_CAT_PREFIX(a,b) FFTS_CAT_PREFIX2(a ## _, b) +#endif + +/* prevent symbol name clashes */ +#ifdef FFTS_PREFIX +#define FUNC_TO_REWRITE FFTS_CAT_PREFIX(FFTS_PREFIX, FUNC_TO_REWRITE) +#endif + #ifdef __ANDROID__ #include #define LOG(s) __android_log_print(ANDROID_LOG_ERROR, "FFTS", s) -- cgit v1.1 From 8020e88dfc580bb8d99b60863dc4cf7e274a6f6e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 12:07:37 +0200 Subject: Added README and LICENSE text. Rebuild ChangeLog from Git logs --- ChangeLog | 5814 +++++++++++++++++++++++++++++++++++++++++++++++++------------ LICENSE | 21 + README | 7 + 3 files changed, 4746 insertions(+), 1096 deletions(-) create mode 100644 LICENSE create mode 100644 README diff --git a/ChangeLog b/ChangeLog index 231f796..c42aa63 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,1183 +1,4805 @@ -2010-05-19 Zoltan Varga +commit e8fa461503cf681fd7f6fffdbe94346cb4a0b94f +Author: Zoltan Varga +Date: Sat Sep 13 13:56:18 2014 -0400 - * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support. + [runtime] Remove an unused interpreter file. -2010-03-30 Zoltan Varga +commit b8e69265771d2d730847add35620628ff003aed1 +Author: Rodrigo Kumpera +Date: Tue Sep 9 09:14:37 2014 -0400 - * arm/*.sh: Remove bash dependency. + [cleanup] Remove more old files. -2009-08-14 Zoltan Varga +commit 69d89956fcc24cec955246588269cb7c8012b7cb +Author: Rodrigo Kumpera +Date: Mon Sep 1 13:25:07 2014 -0400 - * arm/arm-codegen.h: Add armv6 MOVW/MOVT. + [runtime] Remove the interpreter. -2009-07-03 Jerry Maine - - Contributed under the terms of the MIT/X11 license by - Jerry Maine . +commit a9db0d5b41d17cb7ff5788a63ce0eee1e01652b3 +Author: Neale Ferguson +Date: Tue Jun 3 11:52:00 2014 -0400 - * amd64/amd64-codegen.h: Added missing code gen marco for single packed square root. + Architectural level set to z10 instruction set -Fri Jul 24 16:54:13 CEST 2009 Steven Munroe +commit edeeadda807c9189ad6b7cdd0f221c355ad95e52 +Author: Alex Rønne Petersen +Date: Tue Apr 29 16:56:12 2014 +0200 - This patch is contributed under the terms of the MIT/X11 license - - * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted - conversion to support combining addis for bits 32-47 with - signed load/store diplacements for bits 48-63. - (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32. - These instructions are availble to 32-bit programs on 64-bit - hardware and 32-bit both starting with PowerISA V2.01. - [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6 - native mode. - [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for - ppc32. + Add .gitignore file in mono/arch/arm64. -2009-07-20 Zoltan Varga +commit 62b813772cfa4af873a278c39dd1f01dc6e50c2e +Author: Zoltan Varga +Date: Sat Apr 19 20:16:47 2014 +0200 - * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding - of this instruction. + [arm64] Add JIT support. -2009-07-13 Zoltan Varga +commit 1d58ec09524d6f4ce37f39698e68fb45a3c0231b +Author: Zoltan Varga +Date: Sat Apr 19 17:03:21 2014 +0200 - * x86/x86-codegen.h: Applied patch from Marian Salaj . - Fix encoding of PMINSW and PMINSD. Fixes #521662. + [arm64] Add basic port infrastructure. -2009-06-22 Zoltan Varga +commit 12741090edd2230bfd0fac498af3e304680380b4 +Author: Zoltan Varga +Date: Tue Apr 1 18:39:05 2014 +0000 - * ppc/ppc-codegen.h: Rework the naming of the load/store macros, - ldr/str now handle register sized quantities, while ldptr/stptr handle - pointer sized quantities. + [jit] Implement support for atomic intrinsics on arm. -2009-06-20 Zoltan Varga +commit 21ca1bad7d0447bb5d420a58128e1c2733635efa +Author: Rodrigo Kumpera +Date: Wed Dec 11 11:13:14 2013 -0500 - * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside - macros. - - * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities. - Handle little endian host platforms in ppc_emit32. + [arch]Add cvtsi2ss to amd64 codegen. -2009-10-06 Jerry Maine - - Contributed under the terms of the MIT/X11 license by - Jerry Maine . +commit 4a25d5fa1811be15c62979993cd1a37c2891d0a5 +Author: Alex Rønne Petersen +Date: Sat Nov 23 18:26:55 2013 +0100 - * amd64/amd64-codegen.h: Add marcos for coding two byte SIMD/SSE opcodes. - Added comments to help tell the different types of SSE code gen marcos appart. + Fix the encoding of x86_imul_reg_mem_imm. -2009-04-06 Zoltan Varga +commit 43b05e3c36d05526f7a9f3f8767569d026e4f1c6 +Author: Alex Rønne Petersen +Date: Fri Nov 15 15:08:06 2013 +0100 - * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD. + Fix the `nop` opcode on some MIPS-based Loongson CPUs. + + After much trouble building Mono in Debian/MIPS, @directhex + narrowed it down to this issue: + + https://sourceware.org/ml/binutils/2009-11/msg00387.html + + So since some of the 2E and 2F versions of the Loongson CPUs + break with a regular `sll zero, zero, 0` we need to issue an + `or at, at, 0`. This makes sure we don't randomly deadlock or + blow up when the CPU is under heavy load. + + Yes, really. -2009-04-03 Zoltan Varga +commit 2f56d471f089b8f514377ce501a0c1643652d639 +Author: Zoltan Varga +Date: Fri May 24 23:41:39 2013 +0200 - * amd64/amd64-codegen.h: Add macros for decoding the SIB byte. + Merge some Nacl/ARM changes from https://github.com/igotti-google/mono/commit/65d8d68e8c81cf6adb1076de7a9425c84cab86a3. -2009-04-02 Zoltan Varga +commit ab6a96ef346220433f9f7967b763a0453d9cbc66 +Author: Zoltan Varga +Date: Tue May 14 18:27:32 2013 +0200 - * arm/arm-vfp-codegen.h: Add missing VFP codegen macros. + Enable hw division/remainder on mt in non-thumb mode as well. -2009-03-06 Zoltan Varga +commit 78c1e65942210449d0d1c4957b42242ebc9bdb5a +Author: Alex Rønne Petersen +Date: Tue May 14 03:10:43 2013 +0200 - * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing - a file in the middle of a function. + Kill support for the ancient FPA format on ARM. -2009-02-27 Zoltan Varga +commit a42bc8f14a3393150fb6fbb772c2b0259267f5ae +Author: Neale Ferguson +Date: Thu Apr 25 10:01:14 2013 -0400 - * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are - autogenerated. + Add lazy rgctx support to s390x -2009-02-02 Mark Probst +commit 92b3dc346aad94e7e6a91e7356adcebbb180c618 +Author: Zoltan Varga +Date: Mon Apr 22 17:54:27 2013 +0200 - Contributed under the terms of the MIT/X11 license by Steven - Munroe . + Remove obsolete 32 bit s390 backend. - * ppc/ppc-codegen.h: Make operand order and case consistent - (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs, - ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz, - ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu, - ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw, - ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use - "i" or "ui" instead of "d" for immediated operands to immediate - arthimetic and logical instructions in macros ppc_addi, ppc_addis, - ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd. - [__mono_ppc64__]: Make operand order and case consistent - (assembler order) for ppc_load_multiple_regs, - ppc_store_multiple_regs. Simplify the DS form and make them - consistent with D forms for ppc_load_reg, ppc_load_reg_update, - ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu, - ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux. +commit 0d9d79945bfc7e791ed39e7519b8769a3c09fe28 +Author: Elijah Taylor +Date: Thu Jan 31 12:48:49 2013 -0800 -2009-01-19 Rodrigo Kumpera + NaCl GC improvements + + - inline managed code implementation + (add x86 test mem imm8 codegen macro for this as well) + - clean up libgc NaCl code + - centralize mono_nacl_gc into mini.c - * x86/x86-codegen.h: Add x86_movd_xreg_membase. +commit a2b380c30f8e12e508d9b761b9b049d17dff3617 +Author: Zoltan Varga +Date: Fri Mar 1 20:27:07 2013 +0100 -2008-12-19 Mark Probst + Remove the unmaintained and incomplete alpha backend. - * ppc/ppc-codegen.h: Fixed the argument order for lwzu in - ppc_load_reg_update. +commit ddee8bb5125ad07f673a5f9a45ddc629dec8c126 +Author: Zoltan Varga +Date: Tue Feb 26 22:08:26 2013 +0100 -2008-12-12 Mark Mason + Remove the unmaintained and incomplete hppa backend. - * mips/mips-codegen.h: Changes to support n32. - -2008-12-10 Mark Probst +commit 9c434db79ba98565a8dadcfbbe8737621a698589 +Author: Rodrigo Kumpera +Date: Tue Oct 9 17:23:38 2012 -0400 - * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64 - merge. + Use full path for includes as this was braking the cross compiler. -2008-12-05 Mark Probst +commit 600580c96563f5702acee5a0307432e96731d837 +Author: Zoltan Varga +Date: Thu Oct 4 13:03:06 2012 +0200 - * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence. - Added ppc_compare_log. + Save fp registers in the ARM throw trampoline, ios has callee saved fp registers, and LLVM generates code which uses them. -2008-12-05 Mark Probst +commit 0b64268e0a56e3f76063f0b679975be0daaf68b1 +Author: Zoltan Varga +Date: Wed Oct 3 10:26:37 2012 +0200 - * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added - ppc_load/store_multiple_regs and ppc_compare_reg_imm. + Use AM_CPPFLAGS instead of INCLUDES in Makefile.am files, as the later is no longer supported, see http://lists.gnu.org/archive/html/automake/2012-08/msg00087.html. -2008-12-02 Mark Probst +commit f2e43c392dde726d2f1008dfcc8515d34354e968 +Author: Zoltan Varga +Date: Wed Sep 19 01:37:26 2012 +0000 - * ppc/ppc-codegen.h: Opcodes for floating point conversions from - 64 bit integers. + Save/restore fp registers in MonoContext on ios. Fixes #1949. - Code submitted by andreas.faerber@web.de at - https://bugzilla.novell.com/show_bug.cgi?id=324134 under the - X11/MIT license. +commit a841c76b86e38fc8e5db24f152b5fab2501ddf1a +Author: Iain Lane +Date: Sun Apr 15 14:49:55 2012 +0100 -2008-11-28 Mark Probst + Fix ARM printf format problems + + When building with -Werror=format-security on ARM, mono fails to build + due to incorrect format strings in arm-dis.c - * ppc/ppc-codegen.h: #define for the maximum length of a load - sequence. +commit 33426abe6bd7ad8eb37d2f214afe08a0a3d70a0b +Author: Neale Ferguson +Date: Mon Apr 2 13:30:43 2012 -0400 -2008-11-21 Mark Probst + s390x-codegen.h - Define s390_SP and s390_BP + sgen-major-copy-object.h - Correct assertion test + sgen-os-posix.c - Prevent race condition between restarting and suspending a thread - * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit - values. +commit c565eab0f9d79f6009c3878eaa190529838b0204 +Author: Miguel de Icaza +Date: Mon Mar 12 16:15:46 2012 -0400 -2008-11-20 Rodrigo Kumpera + Update some copyrights - * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD. +commit d711efe0d6403fa49697c304696843a789805112 +Author: Zoltan Varga +Date: Fri Dec 2 06:20:16 2011 +0000 -2008-11-20 Rodrigo Kumpera + Ongoing MIPS work. Fix mips_load () to be patchable, fix endianness issue in OP_MIPS_MFC1D, fix OP_JMP. make rcheck runs now. - * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants. +commit 32a164a381080aee3afa42ea33e31d89579519a4 +Author: Zoltan Varga +Date: Wed Nov 16 04:35:31 2011 -0500 - * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm. + Revert "Add support for hardfp abi on ARM." + + This reverts commit e7055b45b9211fb20021997f7da0fa24992421f5. - * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently. +commit aaae806b8bd16a82937c9417689aeb82bea0b952 +Author: Miguel de Icaza +Date: Wed Nov 9 10:25:48 2011 -0500 -2008-11-20 Mark Probst + Update two days worth of copyrights, many more missing - * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in - ppc_load_func to fix the 2 bit shift. +commit 96e5ba7724999828facefb30e0982d0be6931bda +Author: Zoltan Varga +Date: Wed Nov 9 01:13:16 2011 +0100 -2008-11-20 Mark Probst + Add support for hardfp abi on ARM. - * ppc/ppc-codegen.h: 64 bit division opcodes. +commit c6d53e16991eb2dcc3e4d99a008fdd899d2b78f2 +Author: Elijah Taylor +Date: Fri Aug 5 17:02:45 2011 +0200 - Code submitted by andreas.faerber@web.de at - https://bugzilla.novell.com/show_bug.cgi?id=324134 under the - X11/MIT license. + Fix up bugs in x86-codegen for NaCl. -2008-11-20 Zoltan Varga +commit 8034d4b8f49485babcbffd12d3e09fd372c00ccb +Author: Zoltan Varga +Date: Wed Jul 6 16:16:16 2011 +0200 - * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only - used by the interpreter. + Prefix ARM FPA codegen macros with 'FPA'. -2008-11-20 Rodrigo Kumpera +commit d2a95b8feb24584dd528b3deb0f5f1ec5d7766a3 +Author: Zoltan Varga +Date: Thu Jun 23 21:33:43 2011 +0200 - * x86/x86-codegen.h: Add PEXTR B/W/D. + Fix out-of-tree builds on arm. -2008-11-18 Rodrigo Kumpera +commit d093f6fff2bcaa4ccfc795354b151c7ca1a0c613 +Author: Neale Ferguson +Date: Fri May 6 12:52:19 2011 -0400 - * x86/x86-codegen.h: Add PINSR B/W/D. + Implement soft debugger for s390x and fix context macro for s390x -2008-11-18 Mark Probst +commit 4c9723aa3efac03bc33deed252ebda71cbb1ae86 +Author: Zoltan Varga +Date: Tue Mar 8 12:14:52 2011 +0100 - * ppc/ppc-codegen.h: Macro for nop added. + Fix some warnings. -2008-11-18 Mark Probst +commit b1a613aca13e03185d0ba49e46fd77fd8eb98fc9 +Author: Zoltan Varga +Date: Sun Feb 20 03:22:52 2011 +0100 - * ppc/ppc-codegen.h: PPC64 code generation macros. + Implement mono_memory_barrier () and OP_MEMORY_BARRIER for ARM. - Based on code submitted by andreas.faerber@web.de at - https://bugzilla.novell.com/show_bug.cgi?id=324134 under the - X11/MIT license. +commit f81e3005a53a10c39f4ca8dd30a2a88719c7d005 +Author: Neale Ferguson +Date: Sun Jan 16 23:40:23 2011 -0500 -2008-11-18 Mark Probst + Cast result of s390x_emit16/32 to eliminate lots of warning messages + Check for wrapper-managed-to-native when assessing call parameters and have emit_prolog use native_size when processing those parameters + Signed-off-by: Neale Ferguson - * ppc/ppc-codegen.h: A few fixes and additions. +commit 92a55ae009739b5ec652676b8fdd615375c27fc0 +Author: Rodrigo Kumpera +Date: Mon Jan 10 10:52:46 2011 +0000 - Based on code submitted by andreas.faerber@web.de at - https://bugzilla.novell.com/show_bug.cgi?id=324134 under the - X11/MIT license. + Implement mono.simd new conversion ops on amd64 -2008-11-17 Rodrigo Kumpera +commit b7639e01d7603a1e34dd225edb5e99fd2181494b +Author: Rodrigo Kumpera +Date: Mon Jan 10 10:40:12 2011 +0100 - * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ. + Implement a few conversion operations. + + Add conversion operations between 4f, 2d and 4i. + Implemented only on x86 for now. - * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg - macro. +commit f0e5c2be6946491ba052c82794361ec0d33cb04c +Author: Rodrigo Kumpera +Date: Fri Jan 7 00:19:03 2011 +0000 -2008-11-17 Rodrigo Kumpera + AMD64 version of the new mono.simd ops - * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant - and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros. +commit 1aa6254fb828e043ea55d7d3e37b02812e2d9bdf +Author: Rodrigo Kumpera +Date: Thu Jan 6 21:36:31 2011 +0100 -2008-11-15 Andreas Faerber + Implement Shuffle for 64bits types. + + * x86-codegen.h: Add macro and define to emit pshufpd. + + * mini-ops.h: Add OP_SHUPD. + + * cpu-x86.md: + * mini-x86.h: Implement x86 support. + + * simd-intrinsics.c: Handle shuffle on 64bit types. + + * VectorOperations.cs: Add new methods. - * ppc/test.c: Add support for Mac OS X. +commit c1fb94e7e72e58924dcebe8cdfcdbcbe1e65b644 +Author: Rodrigo Kumpera +Date: Thu Jan 6 18:43:59 2011 +0100 - Code is contributed under MIT/X11 license. + Add SHUFPS and macro to emit it. -2008-11-13 Rodrigo Kumpera +commit 48f5efeb334eb4b6e867c65ae53e21b3c45fd771 +Author: Zoltan Varga +Date: Thu Jan 6 19:35:45 2011 +0100 - * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg. + Put back a macro definition accidently removed by the nacl changes. -2008-11-04 Rodrigo Kumpera - - * x86/x86-codegen.h: Add store nta. - -2008-11-03 Rodrigo Kumpera - - * x86/x86-codegen.h: Add prefetch instruction - and x86_sse_alu_reg_membase macro. - -2008-10-28 Rodrigo Kumpera - - * x86/x86-codegen.h: Add long version of the packed integer - ops. - -2008-10-27 Rodrigo Kumpera - - * x86/x86-codegen.h: Add movddup. - -2008-10-24 Rodrigo Kumpera - - * x86/x86-codegen.h: Add signed pack with saturation. - -2008-10-24 Rodrigo Kumpera - - * x86/x86-codegen.h: Add signed packed mul high. - -2008-10-23 Rodrigo Kumpera - - * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation - and compare greater. - -2008-10-20 Rodrigo Kumpera - - * x86/x86-codegen.h: Add multiply and store high. - -2008-10-17 Rodrigo Kumpera - - * x86/x86-codegen.h: Add packed int shuffle. - -2008-10-16 Rodrigo Kumpera - - * x86/x86-codegen.h: Add packed int compare equals and - psabw. - -2008-10-15 Rodrigo Kumpera - - * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask. - -2008-10-14 Rodrigo Kumpera - - * x86/x86-codegen.h: Add movsldup and movshdup. - -2008-10-13 Rodrigo Kumpera - - * x86/x86-codegen.h: Add remaining FP sse1 ops. - Add sse ps encoding with imm operand. - Add remaining sse1 ops. - -2008-10-12 Rodrigo Kumpera - - * x86/x86-codegen.h: Add macro for sse41 ops. - Add defined for pack ops, dword shifts/mul/pack. - -2008-10-11 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support. - -2008-10-10 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets. - -2008-10-09 Rodrigo Kumpera - - * x86/x86-codegen.h: Add macros for sse shift, pack, unpack, - saturated math and packed byte/word/dword math. - -2008-10-03 Rodrigo Kumpera - - * x86/x86-codegen.h: Add macros for SSE instructions. - -2008-09-27 Mark Probst - - * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings. - -2008-09-07 Mark Probst - - * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some - warnings. - -2008-06-16 Mark Probst - - * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro - nobody uses. - -2008-06-06 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the - instructions recommended by the amd64 manual. - -2008-04-19 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of - win64. - -2008-03-13 Geoff Norton - - * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX - -2008-02-18 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro. - -2008-02-14 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro. - -2008-02-08 Zoltan Varga - - * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes - so they are consistent. - -2008-01-24 Zoltan Varga - - * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true. - - * Makefile.am (SUBDIRS): Only set this on arm. - -2007-11-20 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller - instruction encoding. - -2007-11-03 David S. Miller - - * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi - can be used if the constant value only has the top 22 bits set. - -2007-11-01 Geoff Norton - - * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true - Fixes the build on Leopard. - -2007-11-01 Geoff Norton - - * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true - Fixes the build on Leopard. - -2007-10-26 Jonathan Chambers - - * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG# - defines to access param registers. Replace long usage with - gsize as sizeof(long) != sizeof(void*) on Win64. - - Code is contributed under MIT/X11 license. - -2007-10-09 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary - rex prefix which trips up valgrind. - -2007-07-14 Zoltan Varga - - * amd64/amd64-codegen.h: Remove some unused rex prefixes. - -Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro - - * x86/x86-codegen.h: added minimal sse instructions currently - needed by the JIT. - -2007-06-13 Randolph Chung - - * hppa/hppa-codegen.h: Update with more instructions. - * hppa/tramp.c: Disable for linux since we don't support the - interpreter. - -2007-05-20 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed. - -2007-04-23 Zoltan Varga - - * alpha/alpha-codegen.h: More alpha port work from - Sergey Tikhonov . - -Mon Mar 12 17:07:32 CET 2007 Paolo Molaro - - * amd64/amd64-codegen.h: removed some useless size rex prefixes. - -Wed Jan 24 21:00:40 CET 2007 Paolo Molaro - - * arm/arm-codegen.h: fixed encoding of short/byte load/store - instructions with negative immediate offsets. - -Mon Nov 20 17:36:45 CET 2006 Paolo Molaro - - * arm/arm-codegen.h: added suppot for thumb interworking instructions. - -Wed Nov 15 16:56:53 CET 2006 Paolo Molaro - - * mips/*: fixes by Mark E Mason . - -2006-09-12 Zoltan Varga - - * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov . - -Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro - - * arm/*: VFP floating point format code generation support. - -2006-09-12 Zoltan Varga - - * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops. - -2006-07-19 Zoltan Varga - - * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg. - - * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from - Sergey Tikhonov . Updates to alpha support. - -Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro - - * ppc/ppc-codegen.h: reduce noisy build warnings by - casting to the more commonly used unsigned char type - (from johannes@sipsolutions.net (Johannes Berg)). - -2006-05-14 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this - opcode. - -2006-04-21 Zoltan Varga - - * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old - behaviour. - -2006-04-12 Zoltan Varga - - * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro. - -2006-04-04 Zoltan Varga - - * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the - interpreter. - -2005-12-22 Zoltan Varga - - * sparc/sparc-codegen.h (sparc_membar): Add membar instruction. - -2005-10-30 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_m17): Fix a warning. - -2005-10-16 Zoltan Varga - - * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp. - -2005-09-27 Raja R Harinath - - * arm/dpiops.sh, arm/fpaops.h: Output to stdout. - * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix - for srcdir != builddir. - -2005-09-11 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro. - -2005-09-10 Zoltan Varga - - * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions. - Integrate emission of unwind directives into the assembly macros. - -2005-09-04 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_no_stop): New macro. - -2005-08-27 Zoltan Varga - - * ia64/ia64-codegen.h: Fix some bugs. - - * ia64/codegen.c: Update to work with latest ia64-codegen.h - -2005-08-26 Zoltan Varga - - * ia64/Makefile.am: Distribute ia64-codegen.h. - -2005-08-21 Zoltan Varga - - * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs. - -2005-08-17 Zoltan Varga - - * ia64/ia64-codegen.h: Add dependency information for all instructions. - -2005-07-30 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -Wed Jul 20 18:01:54 BST 2005 Paolo Molaro - - * arm/*: more codegen macros. - -2005-07-18 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work. - -2005-06-23 Zoltan Varga - - * ia64/ia64-codegen.h: Add some new pseudo ops. - -2005-06-19 Zoltan Varga - - * ia64/ia64-codegen.h: Fix encoding of ia64_fclass. - -2005-06-12 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-06-09 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-30 Zoltan Varga - - * ia64/codegen.c: Fix it after latest changes. - -2005-05-29 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-28 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - - * ia64/ia64-codegen.h: Ongoing IA64 work. - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-26 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-22 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - - * ia64/ia64-codegen.h: Ongoing IA64 work. - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-21 Zoltan Varga - - * ia64/ia64-codegen.h: Ongoing IA64 work. - -2005-05-19 Zoltan Varga - - * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work. - -2005-05-18 Zoltan Varga - - * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter. - -2005-05-14 Zoltan Varga - - * Makefile.am: Only compile libmonoarch if the interpreter is compiled. - - * ia64/ia64-codegen.h: Add IA64 code generation macros. - - * Makefile.am: Add ia64 subdir. - -2005-05-05 Zoltan Varga - - * alpha/tramp.c: Applied patch from Jakub Bogusz . - -2005-03-28 Zoltan Varga - - * amd64/amd64-codegen.h: Avoid emitting a rex in some places. - -2005-03-15 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the - byte registers. - - * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro. - -2005-03-14 Zoltan Varga - - * amd64/amd64-codegen.h: Add missing AMD64_XMM7. - -2005-03-13 Zoltan Varga - - * amd64/amd64-codegen.h: Remove some unneccesary REXes. - -2005-03-08 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size - variants to some sse2 macros. - - * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert - to a 64 bit value. - -2005-03-06 Zoltan Varga - - * amd64/amd64-codegen.h: Add some SSE2 instructions. - -2005-02-20 Zoltan Varga - - * amd64/amd64-codegen.h: Add xadd instructions. - - * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex. - -2004-11-25 Zoltan Varga - - * amd64/amd64-codegen.h: Updates to support the PIC changes. - -Fri Nov 19 17:29:22 CET 2004 Paolo Molaro - - * ppc/ppc-codegen.h: counter reg decrement branch values - (patch by Geoff Norton ). - -2004-11-16 Patrik Torstensson - - * x86/x86-codegen.h: added opcodes for xadd instructions - -2004-11-15 Zoltan Varga - - * amd64/x86-64-codegen.h: Get rid of this. - -2004-08-30 Zoltan Varga - - * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX - generation. - -2004-08-29 Zoltan Varga - - * amd64/amd64-codegen.h: More SSE work. - - * amd64/amd64-codegen.h: Add SSE2 instructions. - -2004-08-21 Zoltan Varga - - * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG - since under amd64, all 16 registers have a low part. - -2004-08-16 Zoltan Varga - - * x86/x86-codegen.h: Add macros for accessing the mod/rm byte. - -2004-07-30 Zoltan Varga - - * amd64/amd64-codegen.h: Ongoing JIT work. - -2004-07-29 Ben Maurer - - * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm - -2004-07-28 Zoltan Varga - - * amd64/amd64-codegen.h: Ongoing JIT work. - -2004-07-24 Zoltan Varga - - * amd64/amd64-codegen.h: Ongoing JIT work. - -2004-07-23 Zoltan Varga - - * amd64/amd64-codegen.h: Ongoing JIT work. - -2004-07-23 zovarga - - * amd64/amd64-codegen.h: Ongoing JIT work. - -2004-06-30 Zoltan Varga - - * sparc/sparc-codegen.h: Add SPARC64 support. - -2004-05-07 Bernie Solomon - - * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32 - -2004-04-29 Bernie Solomon - - * ppc/tramp.c: use sizeof (stackval), fix - delegate tramp frame layout for Apple - -2004-04-26 David Waite - - * unknown.c: modify to have content for defined platforms (to - avoid ISO C warning) - -2004-03-29 Bernie Solomon - - * amd64/tramp.c: - * arm/tramp.c: - * hppa/tramp.c: - * ppc/tramp.c: - * s390/tramp.c: - * sparc/tramp.c: - * x86/tramp.c: - remove child from MonoInvocation as it isn't used. - - * hppa/hppa-codegen.h: - fix displacements in FP instrs - -2004-03-23 Bernie Solomon - - * hppa/hppa-codegen.h: created - - * hppa/tramp.c: changed style to be more like - other platforms. - - * hppa/Makefile.am: add hppa-codegen.h - -2004-03-16 Zoltan Varga - - * sparc/sparc-codegen.h: Add v9 branches with prediction. - -2004-03-15 Bernie Solomon - - * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg - so Sun's dis command recognizes it. - -2004-03-15 Zoltan Varga - - * sparc/sparc-codegen.h: Add some v9 instructions. - -2004-03-11 Zoltan Varga - - * sparc/sparc-codegen.h: Ongoing sparc work. - -2004-03-07 Duncan Mak - - * Makefile.am: Removed the reference to 'x86-64'. This was the cause - of the missing Mono daily tarballs, 'make dist' wasn't working. - - We do have an 'amd64' directory, but it doesn't make it in 'make - dist'. - -2004-02-19 Zoltan Varga - - * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones. - - * sparc/tramp.c: Fix alignment of structures containing doubles. - -2004-02-02 Zoltan Varga - - * sparc/tramp.c: Implement all floating point argument passing conventions in - Sparc V8. Also fix structure passing in V8. - -Thu Nov 13 16:24:29 CET 2003 Paolo Molaro - - * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct. - -2003-10-31 Zoltan Varga - - * */tramp.c (mono_create_method_pointer): Rename to - mono_arch_create_method_pointer, move common code to a new function in - interp.c. - - * */tramp.c (mono_create_trampoline): Rename to - mono_arch_create_trampoline for consistency. - -2003-10-13 Bernie Solomon - - * x86/tramp.c: restore EDX after memcpy call - -2003-10-13 Bernie Solomon - - * Makefile.am: add hppa subdir - - * hppa/tramp.c: add initial implementation - this is 64 bit only - hppa/Makefile.am hppa/.cvsignore: added - -2003-10-13 Bernie Solomon - - * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation - for V9 (64 bit), cover more 32 bit cases as well. - -2003-09-03 Zoltan Varga - - * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). - -2003-08-21 Zoltan Varga - - * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). - -Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro - - * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us). - -2003-05-20 Dietmar Maurer - - * x86/x86-codegen.h (x86_set_reg): add an assertion - it does - not work for all registers. - -Sat Feb 1 10:59:31 CET 2003 Paolo Molaro - - * alpha/*: update from Laramie. - -Mon Jan 27 12:49:10 CET 2003 Paolo Molaro - - * alpha/*: start of the port to the alpha architecture by - Laramie Leavitt (). - -Tue Jan 21 17:29:53 CET 2003 Paolo Molaro - - * ppc/ppc-codegen.h: completed ppc native code generation by - Taylor Christopher P . - -Fri Jan 17 21:14:18 CET 2003 Paolo Molaro - - * ppc/tramp.c: adapted to work for MacOSX (from a patch by - John Duncan). - -2002-11-11 Mark Crichton - - * sparc/tramp.c: Added some disassembly bits for debugging. - -2002-10-02 Mark Crichton - - * sparc/tramp.c: More cleanup of the trampoline code. Still some - problems with it w.r.t. delegates. - -2002-09-25 Mark Crichton - - * sparc/tramp.c: Off by one error. Whoops. Trampoline code should - now work properly. - -2002-09-24 Mark Crichton - - * sparc/tramp.c: Works as well as the old code did. Cleanup is - finished. The framework now for adding more type support is in, - and should be a *lot* cleaner. - -2002-09-22 Mark Crichton - - * sparc/tramp.c: Completely broke trampolining on SPARC processors. - The code needed a nasty cleanup, so most of it is rewritten. - It will be fixed. - -2002-08-20 Mark Crichton - - * sparc/tramp.c (mono_create_trampoline): Now works on Sparc. Tested - on an Ultra 2 running Linux. - -Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro - - * x86/tramp.c: fix random memory read in mono_create_method_pointer. - -2002-08-05 Dietmar Maurer - - * x86/tramp.c (mono_create_trampoline): fixed stack_size bug - -2002-08-01 Dietmar Maurer - - * x86/tramp.c (mono_create_method_pointer): allocate space for - value types. - (mono_create_trampoline): also push the value type pointer for - methods returning value types. - (mono_create_method_pointer): support valuetype returns. - -2002-07-31 Dietmar Maurer - - * x86/tramp.c: remove code to handle PInvoke because this is no - longer needed. - (mono_create_method_pointer): return method->addr for pinvoke methods - -Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro - - * x86/tramp.c: fix float loads. Simple delegate marshaling fix. - -2002-07-08 Radek Doulik - - * ppc/tramp.c: marshaling for SZARRAY - -2002-07-05 Radek Doulik - - * ppc/tramp.c: removed magic hack - -Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro - - * x86/tramp.c: marshal simple arrays correctly. - -2002-05-27 Miguel de Icaza - - * x86/x86-codegen.h: Set the standard header format. - -2002-05-23 Dietmar Maurer - - * x86/tramp.c (mono_create_method_pointer): removed the magic - trick to store the function pointer in the prolog and use the same - mechanism as in the jit. - -2002-05-13 Radek Doulik - - * ppc/tramp.c (emit_save_parameters): fix I8 parameters - -2002-05-06 Sergey Chaban - - * x86/x86-codegen.h: added missing shifts; - 8-bit ALU operations (reg-reg); - macro for FPU ops with integer operand; - FIST macro (without pop); - -Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro - - * x86/x86-codegen.h: added loop instructions and made x86_patch fully - useful. - -2002-04-20 Dietmar Maurer - - * x86/tramp.c (mono_create_trampoline): support internalcall - String constructors - -Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro - - * x86/tramp.c: fix advancement of argument position on the stack. - -Sat Mar 16 19:12:57 CET 2002 Paolo Molaro - - * x86/tramp.c: increase default allocated size for trampolines - and assert on overflow. - -2002-03-14 Dietmar Maurer - - * x86/tramp.c (mono_create_trampoline): dont use fld/fst to copy - R8 values - -Mon Mar 11 16:14:29 CET 2002 Paolo Molaro - - * x86/x86-codegen.h: addex x86_clear_reg() and changed - x86_mov_reg_imm() to not check for imm == 0. - -Thu Feb 28 12:34:21 CET 2002 Paolo Molaro - - * x86/tramp.c: start handling of more complex marshaling stuff. - -2002-02-25 Sergey Chaban - - * arm: added ARM support code. - * Makefile.am: added arm to DIST_SUBDIRS. - -2002-02-24 Radek Doulik - - * ppc/tramp.c (mono_create_method_pointer): basic delegates - implementation, it works for simple delegates now and I am already - pretty close to have it working for every delegates, but I am - going to sleep and finish it tomorrow? - (mono_create_method_pointer): implements delegates with parameters - and return value - -2002-02-22 Jeffrey Stedfast - - * sparc/tramp.c (mono_create_trampoline): Much tinkering to get - the opcodes more correct. Still needs a lot of work. - -2002-02-19 Radek Doulik - - * ppc/tramp.c (emit_save_parameters): don't start saving 64bit values to - even registers - added stack saving for most arguments - -Tue Feb 19 20:19:38 CET 2002 Paolo Molaro - - * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge). - -2002-02-17 Radek Doulik - - * ppc/tramp.c: fixed minimal stack size, fixed string parameters, - fix byte and half word parameters - (mono_string_new_wrapper): new helper function, cut&pasted from - x86, modified to check for NULL text to avoid branching in - generated code - (calculate_sizes): updated for string retval changes - (emit_call_and_store_retval): updated for string retval - - * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth - -2002-02-16 Radek Doulik - - * ppc/tramp.c (emit_call_and_store_retval): support U2, I2, CHAR - -Mon Feb 11 18:40:04 CET 2002 Paolo Molaro - - * sparc/*: sparc codegen header and some untested trampoline code. - -Mon Feb 11 12:32:35 CET 2002 Paolo Molaro - - * x86/tramp.c: fix handling of multiple marshaleed strings. - * x86/x86-codegen.h: some code to patch branch displacements. - -Fri Feb 1 16:03:00 CET 2002 Paolo Molaro - - * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get (). - -2002-01-23 Miguel de Icaza - - * x86/tramp.c (mono_create_trampoline): Do not try to create a - mono_string_new if the return value from the PInvoke code is - NULL. - -Mon Jan 14 11:50:16 CET 2002 Paolo Molaro - - * x86/x86-codegen.c: added overflow condition code and some aliases - for the other ccs. - -Thu Jan 10 21:01:08 CET 2002 Paolo Molaro - - * x86/tramp.c: mono_create_trampoline (): the runtime argument is - needed to handle correctly delegates, the previous change in handling - the string return type broke them. - -Tue Jan 8 22:38:41 MST 2002 Matt Kimball - - * x86/tramp.c: handle strings returned from functions in external - libraries by converting to a Mono string object after the pinvoke'd - function returns - -Sat Jan 5 15:51:06 CET 2002 Paolo Molaro - - * x86/tramp.c: handle short integer return types. - -Thu Dec 20 20:13:07 CET 2001 Paolo Molaro - - * x86/tramp.c: fix create_method_pointer() to pass the arguments - correctly and add check for overflow. - -Thu Dec 13 15:56:53 CET 2001 Paolo Molaro - - * x86/x86-codegen.h: x86_mov_memindex_imm() added. - -2001-11-29 Radek Doulik - - * ppc/tramp.c: use r12 which is volatile instead of non-volatile - r14 to avoid saving - - * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS - generate libmonoarch for ppc - -Tue Nov 27 15:24:07 CET 2001 Paolo Molaro - - * x96/x86-codegen.c: x86_lea_memindex() added. - -Thu Nov 15 17:41:01 CET 2001 Paolo Molaro - - * x86/tramp.c: handle enums with underlying type different from int32. - -Wed Nov 14 19:21:26 CET 2001 Paolo Molaro - - * x86/tramp.c: handle boolean as a return value. - * x96/x86-codegen.c: x86_widen_memindex() added. - -2001-11-07 Miguel de Icaza - - * x86/tramp.c: Include stdlib to kill warning. - -Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro - - * x86/tramp.c: handle boolean, u1 and i1 as return values. - -Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro - - * x86/x86-codegen.c: added x86_set_{reg,mem,membase}. - -Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro - - * x86/tramp.c: remove mono_get_ansi_string () and use - mono_string_to_utf8 () instead. - -Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro - - * x86/tramp.c: allow marshalling valuetypes if they are - 4 bytes long. - -Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro - - * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment - and avoid a couple of unnecessary instructions. - -Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro - - * x86/tramp.c: marshal valuetypes that are enums. - -Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG - as basereg. - -Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: added memindex addressing mode encoding - (and mov to/from register opcodes). - -Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro - - * x86/tramp.c: don't change a MONO_TYPE_STRING to a char* - when it's an argument to an internalcall. - -Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro - - * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines. - -2001-09-21 Dietmar Maurer - - * x86/x86-codegen.h (x86_breakpoint): added. - -Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: remove C++ comments. - -2001-09-17 Dietmar Maurer - - * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest - -Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro - - * Makefile.am, x86/Makefile.am: conditional compile logic - to make porting to different targets easier. - -Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: fixes and x86_mov_membase_imm (). - * x86/tramp.c: implemented mono_create_method_pointer (): - creates a native pointer to a method implementation that can be - used as a normal C callback. - -Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: added x86_rdtsc() and fixes. - * x86/tramp.c: create trampolines to call pinvoke methods. - * x86/Makefile.am: create a libmonoarch convenience library. - -Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added. - -Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit(). - -Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro - - * x86/x86-codegen.h, x86/test.c: added x86 code emitter with - test. +commit a7074ea55af096913e4bcc8e044be7601bcc55b5 +Author: Zoltan Varga +Date: Thu Jan 6 11:49:32 2011 +0100 + Fix warnings introduced by the NACL merge. + +commit 4edb45273377cc0858dab7e12b19026467e796c5 +Author: Elijah Taylor +Date: Tue Dec 14 16:03:45 2010 -0800 + + Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client + +commit cfdf246cd2ffd65bd25e09f1d66bb55d57bf8953 +Author: Elijah Taylor +Date: Tue Dec 14 14:37:36 2010 -0800 + + Changes to mono/arch/amd64 for Native Client + +commit aa974c33a3cee416fc456053164835acbf81df70 +Author: Rodrigo Kumpera +Date: Fri Sep 24 11:28:46 2010 -0300 + + Implement amd64 support for OP_CARDTABLE. + + * amd64-codegen.h (amd64_alu_reg_membase_size): Add support + for RIP based addressing. + + * cpu-amd64.md: Add card_table_wbarrier. + + * mini-amd64.c (mono_arch_output_basic_block): Emit the + new OP. + + * mini-amd64.c (mono_arch_emit_exceptions): Handle another + kind of patch-info - GC_CARD_TABLE_ADDR. This is required + because we can neither have 64bits immediates with amd64 + or 2 scratch regiters with current regalloc. + + * mini-amd64.h: Define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER. + +commit 7981b77489eba9fafe98b764ae8c423143e55a25 +Author: Mark Mason +Date: Wed Aug 18 23:39:36 2010 +0800 + + Simplify test for MIPS imm16 operands. + + Code contributed under the MIT/X11 license. + +commit 881a8fe8dfebf42e0f50228319132001d121c983 +Author: Elijah Taylor +Date: Mon Aug 9 17:40:18 2010 +0200 + + Add hooks to the codegen macros to support NACL codegen. + +commit da52cebbb28392e8043a36e8c29f4ceb4f706741 +Author: Raja R Harinath +Date: Sun Jul 25 20:09:25 2010 +0530 + + EOL handling + + This set of .gitattributes was automatically generated from the list of files + that GIT tried to normalize when I enabled automatic EOL conversion. + + With this set of attributes, we prevent automated EOL conversion on files that + we know will cause trouble down the road. + +commit 80806328ee52ed52783e005f044e8447d34efac5 +Author: Zoltan Varga +Date: Wed May 19 02:35:46 2010 +0000 + + 2010-05-19 Zoltan Varga + + * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support. + + svn path=/trunk/mono/; revision=157521 + +commit bb66b04f8ca017660ae65afa4b86a33b32d48cdb +Author: Zoltan Varga +Date: Thu Apr 8 04:41:44 2010 +0000 + + .gitignore + + svn path=/trunk/mono/; revision=155025 + +commit 2b562993a3dced62eb48aeedcf38f234b655e86f +Author: Zoltan Varga +Date: Mon Mar 29 23:21:23 2010 +0000 + + 2010-03-30 Zoltan Varga + + * arm/*.sh: Remove bash dependency. + + svn path=/trunk/mono/; revision=154407 + +commit 977db7f5b92aa4e7b8909f6d2440f3347e548364 +Author: Neale Ferguson +Date: Tue Mar 23 20:00:46 2010 +0000 + + Primarily, add support for mono_arch_get_throw_corlib_exception and IMT + for s390x. Other s390x fixes to instruction sizes, parameter passing, and ARCH + settings. + + + svn path=/trunk/mono/; revision=154085 + +commit 282ce11cd7691698334563b95ca4b49e6c32f900 +Author: Gonzalo Paniagua Javier +Date: Fri Nov 20 22:34:30 2009 +0000 + + removing PLATFORM_WIN32 + + svn path=/trunk/mono/; revision=146652 + +commit 774d55350115d1c4f08dc2a9b015e9502d796cef +Author: Zoltan Varga +Date: Tue Nov 10 00:58:49 2009 +0000 + + 2009-11-10 Zoltan Varga + + * arm/arm-codegen.h: Fix the names of the LDMIA/STMIA macros, they don't actually + update the base register. + + svn path=/trunk/mono/; revision=145786 + +commit 568b4a7ab726e87c664a682193fa57c5521ed23c +Author: Zoltan Varga +Date: Fri Aug 14 13:49:01 2009 +0000 + + 2009-08-14 Zoltan Varga + + * arm/arm-codegen.h: Add armv6 MOVW/MOVT. + + svn path=/trunk/mono/; revision=139918 + +commit c4d98f3131b6b7d0732050c2e0ac7bd05b6c27c2 +Author: Jerri Maine +Date: Tue Aug 4 00:31:14 2009 +0000 + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * mono/arch/amd64/amd64-codegen.h: Added missing code gen marco for single packed square root. + * mono/mini/basic-simd.cs: added test for packed double square root. + * mono/mini/cpu-amd64.md: added opcode info for packed double square root. + * mono/mini/cpu-x86.md: added opcode info for packed double square root. + * mono/mini/mini-ops.h: added IR opcode for packed double square root. + * mono/mini/mini-x86.c: added IR to native translation code for packed double square root. + * mono/mini/mini-amd64.c: removed todo for packed double square root. + * mono/mini/simd-intrinsics.c: added method to IR opcode converstion for + packed double square root. + + svn path=/trunk/mono/; revision=139309 + +commit fc5d2d293fe800d860e9af4fcd9b19f9be7d4e17 +Author: Paolo Molaro +Date: Fri Jul 24 15:00:25 2009 +0000 + + Fri Jul 24 16:54:13 CEST 2009 Steven Munroe + + This patch is contributed under the terms of the MIT/X11 license + + * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted + conversion to support combining addis for bits 32-47 with + signed load/store diplacements for bits 48-63. + (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32. + These instructions are availble to 32-bit programs on 64-bit + hardware and 32-bit both starting with PowerISA V2.01. + [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6 + native mode. + [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for + ppc32. + + + svn path=/trunk/mono/; revision=138635 + +commit f44bc9e40cc840bf63bf782aa0338aae3e898f7f +Author: Zoltan Varga +Date: Mon Jul 20 20:45:49 2009 +0000 + + 2009-07-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding + of this instruction. + + svn path=/trunk/mono/; revision=138242 + +commit 88ccf5c589b23d6e79ea5a588d3986693b09879a +Author: Zoltan Varga +Date: Mon Jul 13 21:58:58 2009 +0000 + + 2009-07-13 Zoltan Varga + + * x86/x86-codegen.h: Applied patch from Marian Salaj . + Fix encoding of PMINSW and PMINSD. Fixes #521662. + + svn path=/trunk/mono/; revision=137821 + +commit 64d366eddf3b1c93bcaaff2190fa1cc2b01f7f03 +Author: Jerri Maine +Date: Fri Jul 10 22:35:07 2009 +0000 + + Contributed under the terms of the MIT/X11 license by + Jerry Maine . + + * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. + * amd64/amd64-codegen.h: Fix bugs in simd marcos. + + svn path=/trunk/mono/; revision=137736 + +commit d7fa5cedae9e4859b340ee29e997dfd48b45ce6e +Author: Jerri Maine +Date: Wed Jun 24 21:25:11 2009 +0000 + + Fix wrong date in my entry to ChangeLog files. Sorry! :(( + + svn path=/trunk/mono/; revision=136786 + +commit 1c634ebda21ddf5392c9d8edd030323d1ad85962 +Author: Jerri Maine +Date: Wed Jun 24 21:19:29 2009 +0000 + + mini-amd64.c: Added code to convert simd IR to native amd64 sse. + amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes. + + svn path=/trunk/mono/; revision=136785 + +commit bb994071dcc42ba150d88776fe70f8d35fc522a9 +Author: Neale Ferguson +Date: Tue Jun 23 23:55:26 2009 +0000 + + Fix LCONV_TO_xx and ICONV_TO_xx. Fix leave_method dump of returned + structure. Fix formatting. + Correct instruction lengths. + Add new instructions. + + svn path=/trunk/mono/; revision=136748 + +commit f48a4f5a13745caf5350d6f190efb97ec6b605ef +Author: Zoltan Varga +Date: Mon Jun 22 15:25:02 2009 +0000 + + Fix a few uses of ppc_store_reg. + + svn path=/trunk/mono/; revision=136607 + +commit 4ecc9d712b82d78c853e574edc0345c85bfcd660 +Author: Zoltan Varga +Date: Mon Jun 22 15:24:56 2009 +0000 + + Fix a few uses of ppc_load_reg/ppc_store_reg. + + svn path=/trunk/mono/; revision=136606 + +commit 40c668ecb1553ffb7b6575b439b3ff8420265cd8 +Author: Zoltan Varga +Date: Mon Jun 22 15:22:10 2009 +0000 + + 2009-06-22 Zoltan Varga + + * ppc/ppc-codegen.h: Rework the naming of the load/store macros, + ldr/str now handle register sized quantities, while ldptr/stptr handle + pointer sized quantities. + + svn path=/trunk/mono/; revision=136604 + +commit cf0e113f7dd91ff8b46e35047cc48c2e5ece925c +Author: Zoltan Varga +Date: Sat Jun 20 18:47:03 2009 +0000 + + 2009-06-20 Zoltan Varga + + * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside + macros. + + svn path=/trunk/mono/; revision=136548 + +commit 3858973d0bd980206ea3725a2e74f2a336aa1aa1 +Author: Zoltan Varga +Date: Sat Jun 20 13:04:42 2009 +0000 + + 2009-06-20 Zoltan Varga + + * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities. + Handle little endian host platforms in ppc_emit32. + + svn path=/trunk/mono/; revision=136539 + +commit 9629536810d07a63b980a29912eaf3df7313fee9 +Author: Jerri Maine +Date: Fri Jun 12 17:33:11 2009 +0000 + + Add marcos for coding two byte SIMD/SSE opcodes. Added comments to help tell the different types of SSE code gen marcos appart. + + svn path=/trunk/mono/; revision=136018 + +commit 76cddabf0319c7be9fae2b6c532aafe6587fafbc +Author: Miguel de Icaza +Date: Wed Apr 22 23:59:10 2009 +0000 + + merge + + svn path=/trunk/mono/; revision=132427 + +commit 965b554666f2999b9e01dd731b1134af1cfcd5fa +Author: Zoltan Varga +Date: Mon Apr 6 15:09:57 2009 +0000 + + 2009-04-06 Zoltan Varga + + * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD. + + svn path=/trunk/mono/; revision=131125 + +commit 7b7235494cabe7c5a796fafd6297070f993b03a8 +Author: Zoltan Varga +Date: Thu Apr 2 22:37:35 2009 +0000 + + 2009-04-03 Zoltan Varga + + * amd64/amd64-codegen.h: Add macros for decoding the SIB byte. + + svn path=/trunk/mono/; revision=130910 + +commit 9f497af70ef5ed9244ffbe9a6263f7d077136148 +Author: Zoltan Varga +Date: Thu Apr 2 00:50:47 2009 +0000 + + 2009-04-02 Zoltan Varga + + * arm/arm-vfp-codegen.h: Add missing VFP codegen macros. + + svn path=/trunk/mono/; revision=130817 + +commit 7c682141c5861685e5b0efdcc1f337083657cf9d +Author: Zoltan Varga +Date: Fri Mar 6 15:55:12 2009 +0000 + + 2009-03-06 Zoltan Varga + + * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing + a file in the middle of a function. + + svn path=/trunk/mono/; revision=128782 + +commit a7f6dd7620d7c440216c0f156bcd969a28a592d4 +Author: Martin Baulig +Date: Sat Feb 28 14:36:50 2009 +0000 + + Create .gitignore's. + + svn path=/trunk/mono/; revision=128265 + +commit 22e6e9728faa11a87a7f6f0f0ff0f0f8ef754c03 +Author: Zoltan Varga +Date: Fri Feb 27 06:21:52 2009 +0000 + + 2009-02-27 Zoltan Varga + + * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are + autogenerated. + + svn path=/trunk/mono/; revision=128179 + +commit c70f15fc12afeb73f19d4ff18cf11b7289d76c4f +Author: Mark Probst +Date: Mon Feb 2 23:32:58 2009 +0000 + + 2009-02-02 Mark Probst + + Contributed under the terms of the MIT/X11 license by Steven + Munroe . + + * ppc/ppc-codegen.h: Make operand order and case consistent + (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs, + ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz, + ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu, + ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw, + ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use + "i" or "ui" instead of "d" for immediated operands to immediate + arthimetic and logical instructions in macros ppc_addi, ppc_addis, + ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd. + [__mono_ppc64__]: Make operand order and case consistent + (assembler order) for ppc_load_multiple_regs, + ppc_store_multiple_regs. Simplify the DS form and make them + consistent with D forms for ppc_load_reg, ppc_load_reg_update, + ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu, + ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux. + + 2009-02-02 Mark Probst + + Contributed under the terms of the MIT/X11 license by Steven + Munroe . + + * exceptions-ppc.c (restore_regs_from_context): Correct operand + order (offset then base reg) for ppc_load_multiple_regs. + (emit_save_saved_regs) Correct operand order for + ppc_store_multiple_regs. + (mono_arch_get_call_filter): Correct operand order for + ppc_load_multiple_regs. + + * mini-ppc.c (emit_memcpy): Fix operand order for + ppc_load_reg_update and ppc_store_reg_update. + (mono_arch_output_basic_block): Correct operand order for ppc_lha. + (mono_arch_emit_epilog): Correct operand order for + ppc_load_multiple_regs. + + * tramp-ppc.c (mono_arch_create_trampoline_code): Correct operand + order for ppc_store_multiple_regs and ppc_load_multiple_regs. + + svn path=/trunk/mono/; revision=125443 + +commit f228d47d2afc549321cec800466e6bc1cde631bb +Author: Rodrigo Kumpera +Date: Mon Jan 19 19:47:54 2009 +0000 + + 2009-01-19 Rodrigo Kumpera + + * x86/x86-codegen.h: Add x86_movd_xreg_membase. + + svn path=/trunk/mono/; revision=123825 + +commit 792160756d6ef76711408f151838c3f5a5f8d83b +Author: Mark Probst +Date: Fri Dec 19 19:46:04 2008 +0000 + + 2008-12-19 Mark Probst + + * ppc/ppc-codegen.h: Fixed the argument order for lwzu in + ppc_load_reg_update. + + svn path=/trunk/mono/; revision=121883 + +commit 344a06253c9c1bad287e160b9714b0a052e68a09 +Author: Mark Mason +Date: Sat Dec 13 06:54:25 2008 +0000 + + 2008-12-12 Mark Mason + + * mips/mips-codegen.h: Changes to support n32. + + Contributed under the MIT X11 license. + + svn path=/trunk/mono/; revision=121488 + +commit 2dcc1868b2e2e830a9fa84a445ee79a8f6ab38ba +Author: Mark Probst +Date: Wed Dec 10 09:33:57 2008 +0000 + + 2008-12-10 Mark Probst + + * mini-ppc.c: Merged with mini-ppc64.c. + + * mini-ppc.h: Define PPC_MINIMAL_PARAM_AREA_SIZE on all targets. + + * Makefile.am: Use the same sources for PPC and PPC64. + + * mini-ppc64.c: Removed. + + 2008-12-10 Mark Probst + + * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64 + merge. + + svn path=/trunk/mono/; revision=121203 + +commit 77eff8936b5e423be2712ba66cd8baba0edd2795 +Author: Mark Probst +Date: Fri Dec 5 20:57:02 2008 +0000 + + 2008-12-05 Mark Probst + + * mini-ppc.c: Some simple merges from mini-ppc64.c. + + 2008-12-05 Mark Probst + + * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence. + Added ppc_compare_log. + + svn path=/trunk/mono/; revision=120890 + +commit dd397c9fd311f0411694ff1cc7904aec14f4551b +Author: Mark Probst +Date: Fri Dec 5 16:42:24 2008 +0000 + + 2008-12-05 Mark Probst + + * tramp-ppc.c, mini-ppc.c, mini-ppc.h: Merged tramp-ppc.c with + tramp-ppc64.c. + + * Makefile.am: Use tramp-ppc.c instead of tramp-ppc64.c. + + * tramp-ppc64.c: Removed. + + 2008-12-05 Mark Probst + + * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added + ppc_load/store_multiple_regs and ppc_compare_reg_imm. + + svn path=/trunk/mono/; revision=120852 + +commit 7f226f68fb98684dafd132d90ca1a24635c33557 +Author: Mark Probst +Date: Tue Dec 2 16:03:45 2008 +0000 + + 2008-12-02 Mark Probst + + * tramp-ppc64.c (mono_arch_create_rgctx_lazy_fetch_trampoline): + Fix trampoline size. + + * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: A few floating point + conversion opcodes are implemented natively instead via emulation. + + 2008-12-02 Mark Probst + + * ppc/ppc-codegen.h: Opcodes for floating point conversions from + 64 bit integers. + + Code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + + svn path=/trunk/mono/; revision=120492 + +commit 742361c7bfc21faf8485d20d00cdfc58c04800f9 +Author: Mark Probst +Date: Fri Nov 28 19:06:34 2008 +0000 + + 2008-11-28 Mark Probst + + * mini-ppc64.c, mini-ppc64.h: Enable generalized IMT thunks and + make them work. + + 2008-11-28 Mark Probst + + * object.c: Don't put function descriptors into generalized IMT + thunks. + + 2008-11-28 Mark Probst + + * ppc/ppc-codegen.h: #define for the maximum length of a load + sequence. + + svn path=/trunk/mono/; revision=120248 + +commit b45b096d6d4246f16d05e42838122f1d58f875f6 +Author: Mark Probst +Date: Fri Nov 21 00:21:53 2008 +0000 + + 2008-11-21 Mark Probst + + * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: Several fixes. Now + PPC64 passes basic-long.exe. + + 2008-11-21 Mark Probst + + * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit + values. + + svn path=/trunk/mono/; revision=119560 + +commit dc227de13e4f1cee33c379401adbb90a225e680a +Author: Rodrigo Kumpera +Date: Thu Nov 20 23:45:00 2008 +0000 + + 2008-11-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD. + + svn path=/trunk/mono/; revision=119549 + +commit 01e12b57e8773f9c65c64a91f956b0fa9335d095 +Author: Rodrigo Kumpera +Date: Thu Nov 20 23:44:44 2008 +0000 + + 2008-11-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants. + + * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm. + + * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently. + + svn path=/trunk/mono/; revision=119545 + +commit 96ed3f7c4ea51c61ec3b5d0600c32fa003b8e4f7 +Author: Mark Probst +Date: Thu Nov 20 21:36:13 2008 +0000 + + 2008-11-20 Mark Probst + + * decompose.c: Decompose carry and overflow add on PPC64 like on + other 64 bit archs. Don't decompose sub at all on PPC64. + + * mini-ppc64.c, exceptions-ppc64.c, tramp-ppc64.c, cpu-ppc64.md: + Several fixes and new opcodes. Now PPC64 runs (but doesn't pass) + basic-long.exe. + + 2008-11-20 Mark Probst + + * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in + ppc_load_func to fix the 2 bit shift. + + svn path=/trunk/mono/; revision=119516 + +commit 14651d4fa6b039131000aa5157ed99b7526f89b8 +Author: Mark Probst +Date: Thu Nov 20 21:27:36 2008 +0000 + + 2008-11-20 Mark Probst + + * ppc/ppc-codegen.h: 64 bit division opcodes. + + Code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + + svn path=/trunk/mono/; revision=119515 + +commit daa4af175e0f8b95888918dbf429c7d5f66d3c07 +Author: Zoltan Varga +Date: Thu Nov 20 14:28:51 2008 +0000 + + 2008-11-20 Zoltan Varga + + * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only + used by the interpreter. + + svn path=/trunk/mono/; revision=119444 + +commit 3225dc9308230de9fbbca884c05e6b150a8e0333 +Author: Rodrigo Kumpera +Date: Thu Nov 20 14:12:04 2008 +0000 + + 2008-11-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add PEXTR B/W/D. + + svn path=/trunk/mono/; revision=119441 + +commit 5c317c4676f911a0620b54e6668cf66a5c0dda31 +Author: Rodrigo Kumpera +Date: Tue Nov 18 21:56:58 2008 +0000 + + 2008-11-18 Rodrigo Kumpera + + * x86/x86-codegen.h: Add PINSR B/W/D. + + svn path=/trunk/mono/; revision=119229 + +commit b31b375fc1354cc835d183e7e251e602eeb038c5 +Author: Rodrigo Kumpera +Date: Tue Nov 18 21:56:49 2008 +0000 + + 2008-11-17 Rodrigo Kumpera + + * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ. + + * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg + macro. + + svn path=/trunk/mono/; revision=119227 + +commit dbebfad82832bf895561902dd527d2e4c158c2c9 +Author: Mark Probst +Date: Tue Nov 18 15:32:41 2008 +0000 + + 2008-11-18 Mark Probst + + * ppc/ppc-codegen.h: Macro for nop added. + + 2008-11-18 Mark Probst + + * mini-ppc64.c, mini-ppc64.h, tramp-ppc64.c, cpu-ppc64.md: Changes + for PPC64. An empty program runs now. + + svn path=/trunk/mono/; revision=119162 + +commit 406790f1df77c80b5b28bcac561e7b6c6cd1a3a6 +Author: Mark Probst +Date: Tue Nov 18 10:25:11 2008 +0000 + + 2008-11-18 Mark Probst + + * ppc/ppc-codegen.h: PPC64 code generation macros. + + Based on code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + + svn path=/trunk/mono/; revision=119141 + +commit 484dbedc8136e413a77ee11938d40e713cfefcfd +Author: Mark Probst +Date: Tue Nov 18 10:17:36 2008 +0000 + + 2008-11-18 Mark Probst + + * ppc/ppc-codegen.h: A few fixes and additions. + + Based on code submitted by andreas.faerber@web.de at + https://bugzilla.novell.com/show_bug.cgi?id=324134 under the + X11/MIT license. + + svn path=/trunk/mono/; revision=119140 + +commit 74b70bd5f7bc3b40a919c6c8b06c22facae8df6b +Author: Rodrigo Kumpera +Date: Mon Nov 17 17:00:22 2008 +0000 + + 2008-11-17 Rodrigo Kumpera + + * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant + and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros. + + svn path=/trunk/mono/; revision=119057 + +commit 59483983e37bb55af19f4e98e3de2f1ad216989b +Author: Andreas Färber +Date: Sat Nov 15 10:59:47 2008 +0000 + + 2008-11-15 Andreas Faerber + + * ppc/test.c: Add support for Mac OS X. + + This commit is licensed under the MIT X11 license. + + svn path=/trunk/mono/; revision=118924 + +commit 6c930cb35aa08e10abba989d9cb8560b4636ba73 +Author: Rodrigo Kumpera +Date: Thu Nov 13 22:51:27 2008 +0000 + + 2008-11-13 Rodrigo Kumpera + + * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg. + + svn path=/trunk/mono/; revision=118779 + +commit bfe79f71f1352fbbfb696de3b0c093562b6fefb5 +Author: Rodrigo Kumpera +Date: Tue Nov 4 20:17:31 2008 +0000 + + 2008-11-04 Rodrigo Kumpera + + * x86/x86-codegen.h: Add store nta. + + svn path=/trunk/mono/; revision=117921 + +commit 42f47d048391da1619aa26b70e54980c4c33e3f2 +Author: Rodrigo Kumpera +Date: Mon Nov 3 14:41:44 2008 +0000 + + 2008-11-03 Rodrigo Kumpera + + * x86/x86-codegen.h: Add prefetch instruction + and x86_sse_alu_reg_membase macro. + + svn path=/trunk/mono/; revision=117753 + +commit eaf2804839ffb61912a8eeef7c3a58463aafcdd6 +Author: Rodrigo Kumpera +Date: Tue Oct 28 19:24:34 2008 +0000 + + 2008-10-28 Rodrigo Kumpera + + * x86/x86-codegen.h: Add long version of the packed integer + ops. + + svn path=/trunk/mono/; revision=117292 + +commit 3fffcb4ac5879f2655ee3b4b3bee093a9eaa5016 +Author: Rodrigo Kumpera +Date: Tue Oct 28 00:05:56 2008 +0000 + + 2008-10-27 Rodrigo Kumpera + + * x86/x86-codegen.h: Add movddup. + + svn path=/trunk/mono/; revision=117220 + +commit bf9bec59fad96b9a7cb38921c26bb1c176fe40ce +Author: Rodrigo Kumpera +Date: Fri Oct 24 21:58:17 2008 +0000 + + 2008-10-24 Rodrigo Kumpera + + * x86/x86-codegen.h: Add signed pack with saturation. + + svn path=/trunk/mono/; revision=116995 + +commit 2ffed07a8205616ea4a1605338f08c8ad6c77432 +Author: Rodrigo Kumpera +Date: Fri Oct 24 13:36:53 2008 +0000 + + 2008-10-24 Rodrigo Kumpera + + * x86/x86-codegen.h: Add signed packed mul high. + + svn path=/trunk/mono/; revision=116936 + +commit 2b6070d8bbd583f6bb90e02f3961252ef0854da8 +Author: Gonzalo Paniagua Javier +Date: Fri Oct 24 01:02:49 2008 +0000 + + remove temporary/generated files + + svn path=/trunk/mono/; revision=116902 + +commit 7a2889c2ce0cfbc193324b64764a02e42f5daee8 +Author: Rodrigo Kumpera +Date: Fri Oct 24 00:35:54 2008 +0000 + + 2008-10-23 Rodrigo Kumpera + + * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation + and compare greater. + + svn path=/trunk/mono/; revision=116896 + +commit 600a42f70b41a94712aac746e44f2bba885dfc1f +Author: Rodrigo Kumpera +Date: Mon Oct 20 19:36:04 2008 +0000 + + 2008-10-20 Rodrigo Kumpera + + * x86/x86-codegen.h: Add multiply and store high. + + svn path=/trunk/mono/; revision=116545 + +commit 454b5617264c1bb64ff7296669db98a14cc58118 +Author: Rodrigo Kumpera +Date: Fri Oct 17 17:41:14 2008 +0000 + + 2008-10-17 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int shuffle. + + svn path=/trunk/mono/; revision=116265 + +commit 8336fe34234402529da0e46af634948d678ee649 +Author: Rodrigo Kumpera +Date: Thu Oct 16 23:22:27 2008 +0000 + + 2008-10-16 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int compare equals and + psabw. + + svn path=/trunk/mono/; revision=116117 + +commit 0a6e6df8d766d7ad1b21d6c234826293d1317979 +Author: Rodrigo Kumpera +Date: Wed Oct 15 20:52:54 2008 +0000 + + 2008-10-15 Rodrigo Kumpera + + * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask. + + svn path=/trunk/mono/; revision=115919 + +commit ec2240eaee83b7c5ff444e0708a114458394d55b +Author: Rodrigo Kumpera +Date: Tue Oct 14 15:02:05 2008 +0000 + + 2008-10-14 Rodrigo Kumpera + + * x86/x86-codegen.h: Add movsldup and movshdup. + + svn path=/trunk/mono/; revision=115785 + +commit 7ed9633867d31f5dd5fd971611f952574c005a87 +Author: Rodrigo Kumpera +Date: Mon Oct 13 22:13:15 2008 +0000 + + 2008-10-13 Rodrigo Kumpera + + * x86/x86-codegen.h: Add remaining FP sse1 ops. + Add sse ps encoding with imm operand. + Add remaining sse1 ops. + + svn path=/trunk/mono/; revision=115699 + +commit 18f1e82ca6ebaf0929f654a56ab9ddfadfacacb5 +Author: Rodrigo Kumpera +Date: Mon Oct 13 01:13:10 2008 +0000 + + 2008-10-12 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macro for sse41 ops. + Add defined for pack ops, dword shifts/mul/pack. + + 2008-10-12 Rodrigo Kumpera + + * basic-simd.cs: Remove PackWithUnsignedSaturation tests as it turns out + that the packuswb/packusdw don't work with unsigned numbers for what + would be negative numbers in signed format. + + * cpu-x86.md: Add doubleword forms of many ops and packing ones. + Fix the len of fconv_to_r8_x and xconv_r8_to_i4. + + * mini-ops.h: Add doubleword forms of many ops and packing ones. + + * mini-x86.c: Emit doubleword forms of many ops and packing ones. + + * simd-intrinsics.c (SimdIntrinsc): Rename the flags field to simd_version. + + * simd-intrinsics.c (vector4f_intrinsics): Use simd_version field for sse3 ops. + + * simd-intrinsics.c (vector4u_intrinsics): Rename to vector4ui_intrinsics and + add more ops. + + * simd-intrinsics.c (simd_version_name): New function, returns the name of the + version as the enum in mini.h. + + * simd-intrinsics.c (emit_intrinsics): Instead of having a special emit mode + for sse3 ops, check the simd_version field if present. This way the code + works with all versions of sse. + + svn path=/trunk/mono/; revision=115610 + +commit 494ea4f86907f393c8f0ba660edb100a107a8c80 +Author: Zoltan Varga +Date: Sat Oct 11 05:26:06 2008 +0000 + + 2008-10-11 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support. + + svn path=/trunk/mono/; revision=115509 + +commit ba0739c0dc1dd6713f6127160dcee501b105c300 +Author: Zoltan Varga +Date: Fri Oct 10 21:55:37 2008 +0000 + + 2008-10-10 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets. + + svn path=/trunk/mono/; revision=115494 + +commit 5de452f7ff84e26bd22b86205a1cdb9fc207fe75 +Author: Rodrigo Kumpera +Date: Thu Oct 9 18:28:16 2008 +0000 + + 2008-10-09 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macros for sse shift, pack, unpack, + saturated math and packed byte/word/dword math. + + svn path=/trunk/mono/; revision=115367 + +commit 922c5a03dc6cd66147b1c6bfeb8c1045176618da +Author: Rodrigo Kumpera +Date: Fri Oct 3 14:28:09 2008 +0000 + + 2008-10-03 Rodrigo Kumpera + + * x86/x86-codegen.h: Add macros and enum for SSE instructions. + + svn path=/trunk/mono/; revision=114751 + +commit f2d756dab8d08c009df41d94eb21fdf427a8e01a +Author: Mark Probst +Date: Sat Sep 27 13:02:48 2008 +0000 + + 2008-09-27 Mark Probst + + * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings. + + 2008-09-27 Mark Probst + + * mini-ppc.c: Compiler warning fixes. + + svn path=/trunk/mono/; revision=114279 + +commit 386d8b482a7e399e4e8d130dd0d2d2ab405068ae +Author: Mark Probst +Date: Sun Sep 7 10:25:11 2008 +0000 + + 2008-09-07 Mark Probst + + * marshal.c (mono_type_native_stack_size): Treat + MONO_TYPE_TYPEDBYREF like MONO_TYPE_VALUETYPE. + + 2008-09-07 Mark Probst + + * method-to-ir.c (mono_method_to_ir2): Disable tail calls for PPC + until they're implemented properly. + + * exceptions-ppc.c: Use arch-independent exception-handling code + instead of custom one. + + * exceptions-ppc.c, mini-ppc.c, mini-ppc.h: Bug fixes and changes + for Linear IR. + + * tramp-ppc.c, mini-ppc.c: Fixed warnings. + + * decompose.c, aot-runtime.c, aot-compiler.c: PPC code also + applies when __powerpc__ is defined. + + 2008-09-07 Mark Probst + + * libtest.c: Darwin structure alignment also applies to PPC. + + 2008-09-07 Mark Probst + + * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some + warnings. + + svn path=/trunk/mono/; revision=112455 + +commit 5c8178c1e6cf4d2370c865c6bc66995ca1174eb9 +Author: Mark Probst +Date: Mon Jun 16 09:37:01 2008 +0000 + + 2008-06-16 Mark Probst + + * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro + nobody uses. + + svn path=/trunk/mono/; revision=105886 + +commit ecbcbb317678440e62a13e16820f95f6ea2dff3d +Author: Zoltan Varga +Date: Fri Jun 6 02:08:56 2008 +0000 + + 2008-06-06 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the + instructions recommended by the amd64 manual. + + svn path=/trunk/mono/; revision=105134 + +commit 0ded1416da01e39a6c4a33fc9798123d4021fe4d +Author: Zoltan Varga +Date: Sat Apr 19 14:18:56 2008 +0000 + + 2008-04-19 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of + win64. + + svn path=/trunk/mono/; revision=101210 + +commit cb1954322f73b8d1b0a6836c5242b05538ed72dd +Author: Jb Evain +Date: Sun Apr 13 11:44:22 2008 +0000 + + last merge 100420:100549 + + svn path=/branches/jb/ml2/mono/; revision=100550 + +commit a977d5e7585e338491944fc87b5e018891eedd93 +Author: Geoff Norton +Date: Wed Mar 12 17:08:32 2008 +0000 + + In .: + 2008-03-13 Geoff Norton + + * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX + + + svn path=/trunk/mono/; revision=98063 + +commit 8c6ca9f3fda169feccab289ecd181e06bcc8e133 +Author: Zoltan Varga +Date: Mon Feb 18 18:25:24 2008 +0000 + + 2008-02-18 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro. + + svn path=/trunk/mono/; revision=96092 + +commit 7a7cef000b9d59672b47c0fcdf75bd1fc00b8c78 +Author: Zoltan Varga +Date: Thu Feb 14 14:21:56 2008 +0000 + + 2008-02-14 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro. + + svn path=/trunk/mono/; revision=95633 + +commit 9cbc23b5ee9e4f2dca88f8418d11be97079c25a1 +Author: Zoltan Varga +Date: Fri Feb 8 14:28:06 2008 +0000 + + 2008-02-08 Zoltan Varga + + * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes + so they are consistent. + + svn path=/trunk/mono/; revision=95254 + +commit b951542a9ead8a408c6560a0ffad28a5ade9670d +Author: Zoltan Varga +Date: Thu Jan 24 20:12:46 2008 +0000 + + 2008-01-24 Zoltan Varga + + * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true. + + svn path=/trunk/mono/; revision=93834 + +commit 95aa5dc93dbfbcf10125032ecde0e5eabc969a98 +Author: Zoltan Varga +Date: Thu Jan 24 20:10:14 2008 +0000 + + 2008-01-24 Zoltan Varga + + * Makefile.am (SUBDIRS): Only set this on arm. + + svn path=/trunk/mono/; revision=93833 + +commit 11c84542edf07ed41b831c12058f9a0bdd83df93 +Author: Zoltan Varga +Date: Tue Nov 20 17:45:36 2007 +0000 + + 2007-11-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller + instruction encoding. + + svn path=/trunk/mono/; revision=90005 + +commit b15fabef0c7798e4850432910d97e0249cd691fc +Author: Zoltan Varga +Date: Sat Nov 10 15:22:00 2007 +0000 + + 2007-11-03 David S. Miller + + * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi + can be used if the constant value only has the top 22 bits set. + + svn path=/trunk/mono/; revision=89409 + +commit e22c1134d1553f6da21c1ef50ab4afb009d7c215 +Author: Geoff Norton +Date: Mon Nov 5 22:28:08 2007 +0000 + + 2007-11-01 Geoff Norton + + * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true + Fixes the build on Leopard. + + + svn path=/trunk/mono/; revision=88931 + +commit ad3b3601f5c113df825c3d2e09fb03b5aa4d1208 +Author: Geoff Norton +Date: Thu Nov 1 19:03:16 2007 +0000 + + 2007-11-01 Geoff Norton + + * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true + Fixes the build on Leopard. + + svn path=/trunk/mono/; revision=88673 + +commit 8991f4a9503167171a0ad5e745d71ec4bd8b846c +Author: Jonathan Chambers +Date: Fri Oct 26 14:41:54 2007 +0000 + + 2007-10-26 Jonathan Chambers + + * mini-amd64.c: Begin Win64 port. Use AMD64_ARG_REG# + defines to access param registers. Replace long usage with + gsize as sizeof(long) != sizeof(void*) on Win64. + + * mini-amd64.h: Add %rdi and %rsi to MonoLMF structure + on Win64. Fix intrinsic, use _AddressOfReturnAddress + instead of non-existant _GetAddressOfReturnAddress. + + * tramp-amd64.c: Use AMD64_ARG_REG# defines to access + param registers. Save/restore %rdi and %rsi in MonoLMF. + + * exceptions-amd64.c: Use AMD64_ARG_REG# defines to access + param registers. Modify (throw_exception) signature to take + %rdi and %rsi on Win64. + + Code is contributed under MIT/X11 license. + + 2007-10-26 Jonathan Chambers + + * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG# + defines to access param registers. Replace long usage with + gsize as sizeof(long) != sizeof(void*) on Win64. + + Code is contributed under MIT/X11 license. + + + svn path=/trunk/mono/; revision=88258 + +commit 118f4540a2da9cdb72debfb786a9930e93f2a10b +Author: Zoltan Varga +Date: Tue Oct 9 00:12:58 2007 +0000 + + 2007-10-09 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary + rex prefix which trips up valgrind. + + svn path=/trunk/mono/; revision=87140 + +commit e43f3ebed2b5b54c47b5f8ce458788dce0ef97dc +Author: Zoltan Varga +Date: Sat Jul 14 14:04:54 2007 +0000 + + 2007-07-14 Zoltan Varga + + * amd64/amd64-codegen.h: Remove some unused rex prefixes. + + svn path=/trunk/mono/; revision=81979 + +commit 25f0e1d2bd61097c008fa88e4a114884bb6fe0c9 +Author: Paolo Molaro +Date: Wed Jul 4 13:17:45 2007 +0000 + + Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro + + * x86/x86-codegen.h: added minimal sse instructions currently + needed by the JIT. + + + svn path=/trunk/mono/; revision=81331 + +commit e971b6ec5cf03043dc227759fced05d5786964d4 +Author: Paolo Molaro +Date: Wed Jun 13 17:41:53 2007 +0000 + + 2007-06-13 Randolph Chung + + * hppa/hppa-codegen.h: Update with more instructions. + * hppa/tramp.c: Disable for linux since we don't support the + interpreter. + + + svn path=/trunk/mono/; revision=79463 + +commit 26169bb71cd30b373975373952fb11d7a26b0cca +Author: Zoltan Varga +Date: Sun May 20 19:41:51 2007 +0000 + + 2007-05-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed. + + svn path=/trunk/mono/; revision=77730 + +commit a024b2405701bbee2003e46a0f9b0e2c0486033c +Author: Zoltan Varga +Date: Mon Apr 23 11:31:33 2007 +0000 + + 2007-04-23 Zoltan Varga + + * alpha/alpha-codegen.h: More alpha port work from + Sergey Tikhonov . + + svn path=/trunk/mono/; revision=76103 + +commit 5ca5ea86f1ff85953c28e0ba3b657268cd2cdfba +Author: Neale Ferguson +Date: Sun Apr 15 09:11:00 2007 +0000 + + * tramp.c: Add MONO_TYPE_PTR case. + * mini-s390.c: Correct checking for enum type in return value processing. + + svn path=/trunk/mono/; revision=75718 + +commit 9159abc7ec906d64a15eee8e02b9e5b3f2cce87d +Author: Neale Ferguson +Date: Thu Apr 12 20:45:34 2007 +0000 + + * tramp.c: Add MONO_TYPE_PTR case. + + + svn path=/trunk/mono/; revision=75663 + +commit b7fd657ee94257eeec946fa9eb11b3f60e7e33e6 +Author: Paolo Molaro +Date: Mon Mar 12 16:07:56 2007 +0000 + + Mon Mar 12 17:07:32 CET 2007 Paolo Molaro + + * amd64/amd64-codegen.h: removed some useless size rex prefixes. + + + svn path=/trunk/mono/; revision=74128 + +commit 0ba3e4bdd057c7a0d25767f7647a00f07683b44c +Author: Paolo Molaro +Date: Wed Jan 24 20:01:27 2007 +0000 + + Wed Jan 24 21:00:40 CET 2007 Paolo Molaro + + * arm/arm-codegen.h: fixed encoding of short/byte load/store + instructions with negative immediate offsets. + + + svn path=/trunk/mono/; revision=71622 + +commit 0251f000fba5c8f99bec6c33beae0c2aabe66451 +Author: Neale Ferguson +Date: Tue Jan 23 17:11:29 2007 +0000 + + * s390x-codegen.h: Add packed attribute to several instruction structures. + + svn path=/trunk/mono/; revision=71523 + +commit 8e25ae408b9d1836130807d3f465023347051332 +Author: Miguel de Icaza +Date: Fri Dec 22 22:51:15 2006 +0000 + + Patch from Sergey Tikhonov + + Mono on Alpha updates: + + - Code cleanup + - Some hacks to support debugger + - updates for "linears" optimization + + + svn path=/trunk/mono/; revision=69976 + +commit edd2746e20c982e094abfd547afad74d8e7d2302 +Author: Paolo Molaro +Date: Mon Nov 20 16:37:26 2006 +0000 + + Mon Nov 20 17:36:45 CET 2006 Paolo Molaro + + * arm/arm-codegen.h: added suppot for thumb interworking instructions. + + + svn path=/trunk/mono/; revision=68201 + +commit b63503e7c4b5ebb8baafb5b58ec69395146db022 +Author: Paolo Molaro +Date: Wed Nov 15 16:00:09 2006 +0000 + + Wed Nov 15 16:56:53 CET 2006 Paolo Molaro + + * mips/*: fixes by Mark E Mason . + + + svn path=/trunk/mono/; revision=67929 + +commit 6f8d67005785ba86e81ac930325767d0b270a070 +Author: Paolo Molaro +Date: Fri Nov 10 18:42:10 2006 +0000 + + Typo fixes. + + svn path=/trunk/mono/; revision=67683 + +commit f99322f3ea7b7be85ac63c87c664aafb7f5e17bf +Author: Miguel de Icaza +Date: Wed Oct 11 21:34:24 2006 +0000 + + 2006-10-11 Sergey Tikhonov + + * atomic.h: Fix atomic decrement. + + * mini/cpu-alpha.md: Use native long shift insts + + * mono/mono/mini/tramp-alpha.c: Implemented + mono_arch_patch_delegate_trampoline method + + * Started work on using global registers + + * Use byte/word memory load/store insts if cpu supports it + + * Code clean up + + + + + svn path=/trunk/mono/; revision=66573 + +commit 538fd0794b9ef24f7c765891ed682fc947cf8e85 +Author: Zoltan Varga +Date: Tue Sep 12 13:02:59 2006 +0000 + + 2006-09-12 Zoltan Varga + + * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov . + + svn path=/trunk/mono/; revision=65305 + +commit 0689ca5f72fa8cb03fb1b565a31c4e2b22774a64 +Author: Paolo Molaro +Date: Tue Sep 12 11:10:42 2006 +0000 + + Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro + + * arm/*: VFP floating point format code generation support. + + + svn path=/trunk/mono/; revision=65295 + +commit deacad246a936216f09a81b9881c6780de8dd406 +Author: Zoltan Varga +Date: Tue Sep 12 10:05:29 2006 +0000 + + 2006-09-12 Zoltan Varga + + * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops. + + svn path=/trunk/mono/; revision=65289 + +commit 207e90216277d1d1ee0e6cd37f183440c8c39a26 +Author: Zoltan Varga +Date: Wed Jul 19 12:10:43 2006 +0000 + + 2006-07-19 Zoltan Varga + + * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg. + + svn path=/trunk/mono/; revision=62746 + +commit 8f58fa13418008cb86a8ba450a894b23efc4574e +Author: Zoltan Varga +Date: Wed Jul 19 12:09:09 2006 +0000 + + 2006-07-19 Zoltan Varga + + * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from + Sergey Tikhonov . Updates to alpha support. + + svn path=/trunk/mono/; revision=62745 + +commit ef8021400f045f835fcf70baf5ba5880fe6eca93 +Author: Paolo Molaro +Date: Thu Jun 15 15:00:59 2006 +0000 + + Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro + + * ppc/ppc-codegen.h: reduce noisy build warnings by + casting to the more commonly used unsigned char type + (from johannes@sipsolutions.net (Johannes Berg)). + + + svn path=/trunk/mono/; revision=61757 + +commit de54a3e44b1214298b39386b49e1ca992176e2e4 +Author: Zoltan Varga +Date: Sun May 14 18:51:25 2006 +0000 + + 2006-05-14 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this + opcode. + + svn path=/trunk/mono/; revision=60695 + +commit 3b274ddc5c946640a4c0d6a7b2dee13cd2f5096d +Author: Zoltan Varga +Date: Fri Apr 21 14:51:24 2006 +0000 + + 2006-04-21 Zoltan Varga + + * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old + behaviour. + + svn path=/trunk/mono/; revision=59758 + +commit e830aadb2febf62051b8fc162884a909087cfe4e +Author: Zoltan Varga +Date: Wed Apr 12 19:02:09 2006 +0000 + + 2006-04-12 Zoltan Varga + + * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro. + + svn path=/trunk/mono/; revision=59415 + +commit a65cd014e420a38b47e00f5c6f9ce590fc00987b +Author: Zoltan Varga +Date: Tue Apr 4 13:18:49 2006 +0000 + + 2006-04-04 Zoltan Varga + + * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the + interpreter. + + svn path=/trunk/mono/; revision=59009 + +commit 0d566f3cb37ddf731fba6cfce9741e2224a13d77 +Author: Neale Ferguson +Date: Mon Mar 13 22:03:39 2006 +0000 + + * s390x-codegen.h: Fix immediate checks. + + svn path=/trunk/mono/; revision=57914 + +commit 15bc8b574c91bfaa40cd1d83374d0179148b5894 +Author: Neale Ferguson +Date: Fri Jan 6 18:52:21 2006 +0000 + + * s390x-codegen.h: Add lpdbr instruction (OP_ABS). + + * mini-s390x.c, inssel-s390x.brg, cpu-s390x.md: Fix ATOMIC_I8 + operations. Provide initial support for OP_ABS. + + svn path=/trunk/mono/; revision=55158 + +commit 1092c74e7a468b7761df92c2dc0dd2f2b49f21e6 +Author: Neale Ferguson +Date: Tue Jan 3 19:40:34 2006 +0000 + + * mono/io-layer/ChangeLog, mono/io-layer/atomic.h, mono/mini/mini-s390x.c, + mono/mini/mini-s390x.h, mono/mini/exceptions-s390x.c, + mono/mini/ChangeLog, mono/mini/s390-abi.cs, mono/mini/tramp-s390x.c, + mono/mini/inssel-s390x.brg, mono/mini/cpu-s390x.md, mono/mini/mini-codegen.c + mono/mini/basic-long.cs, mono/mini/Makefile.am, mono/arch/s390x/ChangeLog + mono/arch/s390x/s390x-codegen.h: 64-bit s390 support + + svn path=/trunk/mono/; revision=55020 + +commit 417b7fbe8f810e8fd62b2cb805164a3b80a536d6 +Author: Zoltan Varga +Date: Thu Dec 22 20:18:18 2005 +0000 + + 2005-12-22 Zoltan Varga + + * sparc/sparc-codegen.h (sparc_membar): Add membar instruction. + + svn path=/trunk/mono/; revision=54750 + +commit 259b4749eaf68bfd6818ab38df91e37239c5dd45 +Author: Neale Ferguson +Date: Tue Dec 13 19:12:20 2005 +0000 + + Continuing to bring s390 up to current levels + + svn path=/trunk/mono/; revision=54312 + +commit f5fc186c01c764705e303b3783bf06e507e54640 +Author: Paolo Molaro +Date: Tue Dec 13 13:57:51 2005 +0000 + + Avoid lvalue pointer casts. + + svn path=/trunk/mono/; revision=54279 + +commit ab97bc8d9e311f447d9f4a78e5a28ef6ff9b82ad +Author: Zoltan Varga +Date: Sun Oct 30 18:06:59 2005 +0000 + + 2005-10-30 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_m17): Fix a warning. + + svn path=/trunk/mono/; revision=52399 + +commit bb6893fc1e1854a8c9f848dfbfbc2dd00bde8735 +Author: Zoltan Varga +Date: Sun Oct 16 15:21:39 2005 +0000 + + 2005-10-16 Zoltan Varga + + * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp. + + svn path=/trunk/mono/; revision=51764 + +commit 0b2d13a625bfd03f8d24538ef48870daed540ee3 +Author: Miguel de Icaza +Date: Fri Oct 7 21:25:31 2005 +0000 + + Patch incorporated from SUSE, Neale reviewed it + + svn path=/trunk/mono/; revision=51443 + +commit 2bba48015b516fd326cd082eb85325aa5b7676bf +Author: Miguel de Icaza +Date: Fri Oct 7 20:36:01 2005 +0000 + + Patch incorporated from SUSE, Neale reviewed it + + svn path=/trunk/mono/; revision=51434 + +commit 749c9989f64683d8363481304647924ec1d910af +Author: Paolo Molaro +Date: Tue Sep 27 13:25:16 2005 +0000 + + Another compilation fix. + + svn path=/trunk/mono/; revision=50857 + +commit 64dbeb6e048aa9654800624a74e9c58065cf01ea +Author: Raja R Harinath +Date: Tue Sep 27 09:09:41 2005 +0000 + + * arm/dpiops.sh, arm/fpaops.h: Output to stdout. + * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix + for srcdir != builddir. + + svn path=/trunk/mono/; revision=50833 + +commit 7c363c19299d3f85ee7de0eec2a83108ea98eff2 +Author: Paolo Molaro +Date: Mon Sep 26 08:58:47 2005 +0000 + + Compilation fix. + + svn path=/trunk/mono/; revision=50748 + +commit 541c387c65579ca75abe8cdb9d0725c1e6d90df1 +Author: Zoltan Varga +Date: Sun Sep 11 16:55:41 2005 +0000 + + 2005-09-11 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro. + + svn path=/trunk/mono/; revision=49910 + +commit efbd8e41cf3337d59812a7cca48df3caee116b07 +Author: Zoltan Varga +Date: Sat Sep 10 20:50:37 2005 +0000 + + 2005-09-10 Zoltan Varga + + * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions. + Integrate emission of unwind directives into the assembly macros. + + svn path=/trunk/mono/; revision=49875 + +commit 8b07d9836f60fee4ff83a14ce110921be8ef8f2e +Author: Zoltan Varga +Date: Sat Sep 3 22:06:10 2005 +0000 + + 2005-09-04 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_no_stop): New macro. + + svn path=/trunk/mono/; revision=49399 + +commit 4e89407a4a8dc38125a804df930515a31603cdca +Author: Zoltan Varga +Date: Sat Aug 27 14:33:09 2005 +0000 + + 2005-08-27 Zoltan Varga + + * ia64/ia64-codegen.h: Fix some bugs. + + * ia64/codegen.c: Update to work with latest ia64-codegen.h + + svn path=/trunk/mono/; revision=48969 + +commit 9a52b3ea85b1899c6cc23263eec6879841b3fd08 +Author: Zoltan Varga +Date: Fri Aug 26 13:34:24 2005 +0000 + + 2005-08-26 Zoltan Varga + + * ia64/Makefile.am: Distribute ia64-codegen.h. + + svn path=/trunk/mono/; revision=48891 + +commit 16291812e22e9750bf101e297fc573ce35bab382 +Author: Wade Berrier +Date: Fri Aug 26 06:58:33 2005 +0000 + + Oops + + svn path=/trunk/mono/; revision=48874 + +commit d4b1ea47e0395555276e1a6c8ddfa3800692b6ea +Author: Wade Berrier +Date: Fri Aug 26 06:48:41 2005 +0000 + + Include files for 'make dist' + + svn path=/trunk/mono/; revision=48871 + +commit cac0da0afb2a782de1db55a000a2125531e757fd +Author: Zoltan Varga +Date: Sat Aug 20 22:16:11 2005 +0000 + + 2005-08-21 Zoltan Varga + + * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs. + + svn path=/trunk/mono/; revision=48614 + +commit d151f0e0b203a78ca99cab91d9df89ffe7728880 +Author: Zoltan Varga +Date: Wed Aug 17 20:28:30 2005 +0000 + + 2005-08-17 Zoltan Varga + + * ia64/ia64-codegen.h: Add dependency information for all instructions. + + svn path=/trunk/mono/; revision=48476 + +commit f1bce593b3504a82fc344d696eeedd91c39bcfee +Author: Paolo Molaro +Date: Thu Aug 4 18:51:34 2005 +0000 + + Uncommitted fixes. + + svn path=/trunk/mono/; revision=48015 + +commit 8348805e278d70da207455a0fe5cd470b00f3d8d +Author: Zoltan Varga +Date: Sat Jul 30 15:43:43 2005 +0000 + + 2005-07-30 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=47855 + +commit 0fb75c64cb1361cc81a4e47ca556a597b440d65a +Author: Paolo Molaro +Date: Wed Jul 20 16:55:20 2005 +0000 + + Wed Jul 20 18:01:54 BST 2005 Paolo Molaro + + * arm/*: more codegen macros. + + + svn path=/trunk/mono/; revision=47473 + +commit 2205bab6932e69490e48b9e11957041e938020ee +Author: Zoltan Varga +Date: Mon Jul 18 20:33:37 2005 +0000 + + 2005-07-18 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work. + + svn path=/trunk/mono/; revision=47395 + +commit 5a9a7537801ad68c0f8552e7e107994b793e93ac +Author: Zoltan Varga +Date: Wed Jun 22 22:00:43 2005 +0000 + + 2005-06-23 Zoltan Varga + + * ia64/ia64-codegen.h: Add some new pseudo ops. + + svn path=/trunk/mono/; revision=46401 + +commit f51b94e34b1a887304ace96af27d51b4ec98ab4b +Author: Zoltan Varga +Date: Sun Jun 19 20:18:07 2005 +0000 + + 2005-06-19 Zoltan Varga + + * ia64/ia64-codegen.h: Fix encoding of ia64_fclass. + + svn path=/trunk/mono/; revision=46224 + +commit 398224a9101808c8ca470b24366a506eeefec135 +Author: Zoltan Varga +Date: Sun Jun 12 20:41:05 2005 +0000 + + 2005-06-12 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45834 + +commit 5a9f032072053d76af233b9906614ee491d6295c +Author: Zoltan Varga +Date: Thu Jun 9 20:22:08 2005 +0000 + + 2005-06-09 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45719 + +commit 5f3ca7841b8aedd35f0c23781f2ac96f31ed501e +Author: Zoltan Varga +Date: Mon May 30 14:09:48 2005 +0000 + + 2005-05-30 Zoltan Varga + + * ia64/codegen.c: Fix it after latest changes. + + svn path=/trunk/mono/; revision=45192 + +commit d6844049f8659741b3afe9fa66136738107d28ac +Author: Zoltan Varga +Date: Sun May 29 14:24:56 2005 +0000 + + 2005-05-29 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45159 + +commit 4be6ea9e269927e9fbf06b0b73f53fef311f569f +Author: Zoltan Varga +Date: Sun May 29 11:16:27 2005 +0000 + + 2005-05-29 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45157 + +commit 7b483f1f48c7abc9d0c17a1fb34b30ddaa7058bb +Author: Zoltan Varga +Date: Sat May 28 18:02:41 2005 +0000 + + 2005-05-28 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45147 + +commit e360150e81b841b0644b5adc604f22f4b71e3987 +Author: Zoltan Varga +Date: Sat May 28 17:08:04 2005 +0000 + + 2005-05-28 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45145 + +commit a781c3a65727b60386604adc6023f3f5a53b3e3e +Author: Zoltan Varga +Date: Fri May 27 21:41:59 2005 +0000 + + 2005-05-28 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45127 + +commit 20c2fc7ba73ffaf5506ab9bf487c3f519de5067f +Author: Zoltan Varga +Date: Thu May 26 17:16:50 2005 +0000 + + 2005-05-26 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=45064 + +commit f37723d307325b539fc515774d3988e0c7ff7a14 +Author: Zoltan Varga +Date: Sun May 22 18:25:06 2005 +0000 + + 2005-05-22 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=44892 + +commit 1d1c3f56953c0cb26c2e695b468ea1da368aaef0 +Author: Zoltan Varga +Date: Sun May 22 13:31:28 2005 +0000 + + 2005-05-22 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=44888 + +commit e32454dae1a3679056fb4ac86ffc81defc3a5eb7 +Author: Zoltan Varga +Date: Sun May 22 01:29:00 2005 +0000 + + 2005-05-22 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=44883 + +commit fee3f0247077513ba3254ddb410687a11c667b8c +Author: Zoltan Varga +Date: Fri May 20 21:55:37 2005 +0000 + + 2005-05-21 Zoltan Varga + + * ia64/ia64-codegen.h: Ongoing IA64 work. + + svn path=/trunk/mono/; revision=44855 + +commit 1d94e7499dc18c3882f4aa16e977ceeaacddd466 +Author: Zoltan Varga +Date: Wed May 18 23:02:39 2005 +0000 + + 2005-05-19 Zoltan Varga + + * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work. + + svn path=/trunk/mono/; revision=44722 + +commit 3f053b86a49d8c41d47ca2ff771bda64ee5a5ddc +Author: Zoltan Varga +Date: Wed May 18 18:55:54 2005 +0000 + + 2005-05-18 Zoltan Varga + + * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter. + + svn path=/trunk/mono/; revision=44705 + +commit 061e9ab4d483c98d6747caad5160bd30fbbf09ab +Author: Zoltan Varga +Date: Sat May 14 19:52:56 2005 +0000 + + 2005-05-14 Zoltan Varga + + * Makefile.am: Only compile libmonoarch if the interpreter is compiled. + + svn path=/trunk/mono/; revision=44526 + +commit 82a68f6e85fbc7aaa7832584b2f51953871f1390 +Author: Zoltan Varga +Date: Sat May 14 17:35:42 2005 +0000 + + 2005-05-14 Zoltan Varga + + * ia64/ia64-codegen.h: Add IA64 code generation macros. + + * Makefile.am: Add ia64 subdir. + + svn path=/trunk/mono/; revision=44523 + +commit 800d43a2433ffc57d904687fdd2b746d5277cab5 +Author: Zoltan Varga +Date: Thu May 5 12:13:33 2005 +0000 + + 2005-05-05 Zoltan Varga + + * alpha/tramp.c: Applied patch from Jakub Bogusz . + + svn path=/trunk/mono/; revision=44078 + +commit 293459dd29bdd85542f499e0530c9504ced01604 +Author: Zoltan Varga +Date: Mon Mar 28 21:09:11 2005 +0000 + + 2005-03-28 Zoltan Varga + + * amd64/amd64-codegen.h: Avoid emitting a rex in some places. + + svn path=/trunk/mono/; revision=42316 + +commit 140d5636edd892a388da877b7035f1809590e7ff +Author: Zoltan Varga +Date: Tue Mar 15 19:47:29 2005 +0000 + + 2005-03-15 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the + byte registers. + + svn path=/trunk/mono/; revision=41848 + +commit 242ec30220c85e3f69a1dd1d50469771c4ba7047 +Author: Zoltan Varga +Date: Tue Mar 15 17:08:39 2005 +0000 + + 2005-03-15 Zoltan Varga + + * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro. + + svn path=/trunk/mono/; revision=41842 + +commit f7074904827b639bb500dcb92c481ec9f35a88a0 +Author: Zoltan Varga +Date: Mon Mar 14 15:17:54 2005 +0000 + + 2005-03-14 Zoltan Varga + + * amd64/amd64-codegen.h: Add missing AMD64_XMM7. + + svn path=/trunk/mono/; revision=41795 + +commit d23ce2f6ba82d598af825e20b95cf7938ff5bc39 +Author: Zoltan Varga +Date: Sun Mar 13 16:57:42 2005 +0000 + + 2005-03-13 Zoltan Varga + + * amd64/amd64-codegen.h: Remove some unneccesary REXes. + + svn path=/trunk/mono/; revision=41765 + +commit ad5014de38c4bde6ef12a04bbbcdf0303ac8acc1 +Author: Zoltan Varga +Date: Tue Mar 8 11:11:38 2005 +0000 + + 2005-03-08 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size + variants to some sse2 macros. + + svn path=/trunk/mono/; revision=41557 + +commit ee4c2805588b6d8291ac4349a520ca9c99050b59 +Author: Zoltan Varga +Date: Tue Mar 8 09:28:19 2005 +0000 + + 2005-03-08 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert + to a 64 bit value. + + svn path=/trunk/mono/; revision=41554 + +commit 3c4a8677815d2ad4e0b47b809ca16b43f33e3f96 +Author: Zoltan Varga +Date: Sun Mar 6 21:25:22 2005 +0000 + + 2005-03-06 Zoltan Varga + + * amd64/amd64-codegen.h: Add some SSE2 instructions. + + svn path=/trunk/mono/; revision=41491 + +commit b175669d7abc2f7e83940305cf2cb1f7663569b0 +Author: Zoltan Varga +Date: Sun Feb 20 18:48:25 2005 +0000 + + 2005-02-20 Zoltan Varga + + * amd64/amd64-codegen.h: Add xadd instructions. + + svn path=/trunk/mono/; revision=40956 + +commit c7a5bc7b7055832a36dc63ba67ad7add33a95d06 +Author: Zoltan Varga +Date: Sun Feb 20 14:16:51 2005 +0000 + + 2005-02-20 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex. + + svn path=/trunk/mono/; revision=40934 + +commit 2cf88a5c39f13e54cc5e5f95ab6021924077c1d8 +Author: Ben Maurer +Date: Wed Feb 16 04:43:00 2005 +0000 + + remove .cvsignore, as this is not used anymore + + svn path=/trunk/mono/; revision=40731 + +commit 0c1ce771e696eabde58e35deb64c0b578be7a92d +Author: Neale Ferguson +Date: Mon Jan 10 21:13:14 2005 +0000 + + - Fix atomic ops on s390 + - Implement OP_ATOMIC_xxx operations on s390 + - Standardize exception handling on s390 with other platforms + - Enable out of line bblock support + - Check vtable slot belongs to domain when building trampoline + + svn path=/trunk/mono/; revision=38647 + +commit 9f3d964963eac63f42db702fe80cbfa89e3a73b4 +Author: Raja R Harinath +Date: Mon Dec 13 06:05:53 2004 +0000 + + remove svn:executable from *.cs *.c *.h + + svn path=/trunk/mono/; revision=37682 + +commit c7b8d172d479d75da8d183f9491e4651bbc5b4f7 +Author: Neale Ferguson +Date: Tue Dec 7 04:18:03 2004 +0000 + + Fix atomic operations and add initial support for tls support. + + svn path=/trunk/mono/; revision=37284 + +commit c523c66bf11c9c05df3d77d42f8be9821ad558e5 +Author: Zoltan Varga +Date: Thu Nov 25 13:32:53 2004 +0000 + + 2004-11-25 Zoltan Varga + + * amd64/amd64-codegen.h: Updates to support the PIC changes. + + svn path=/trunk/mono/; revision=36549 + +commit da4b0970bffc8f281679bddf7371679910d0a23c +Author: Paolo Molaro +Date: Fri Nov 19 15:04:41 2004 +0000 + + Fri Nov 19 17:29:22 CET 2004 Paolo Molaro + + * ppc/ppc-codegen.h: counter reg decrement branch values + (patch by Geoff Norton ). + + + svn path=/trunk/mono/; revision=36320 + +commit 3e56873e56ee01f0195683a20bd44e0fd03db4ee +Author: Patrik Torstensson +Date: Thu Nov 18 18:44:57 2004 +0000 + + 2004-11-16 Patrik Torstensson + + * x86/x86-codegen.h: added opcodes for xadd instructions + + + svn path=/trunk/mono/; revision=36283 + +commit 59c3726af38156a306a67c2dd6e755e8bdd0d89a +Author: Neale Ferguson +Date: Wed Nov 17 03:05:28 2004 +0000 + + Add support for siginfo_t as a parameter to mono_arch_is_int_overflow. Support this + routine in s390. + + svn path=/trunk/mono/; revision=36188 + +commit 149905478e1af4189a0cd9cf3f0e294dbb2bccbc +Author: Zoltan Varga +Date: Mon Nov 15 19:00:05 2004 +0000 + + 2004-11-15 Zoltan Varga + + * amd64/x86-64-codegen.h: Get rid of this. + + svn path=/trunk/mono/; revision=36145 + +commit b982bf7e3e3e98afa37544b4a197d406f00b5e5a +Author: Ben Maurer +Date: Mon Nov 8 03:19:16 2004 +0000 + + fix + + svn path=/trunk/mono/; revision=35803 + +commit 4c5436f259d4a109ab352f2ec7b7891cdce76cc9 +Author: Ben Maurer +Date: Mon Sep 6 15:07:37 2004 +0000 + + fix warning + + svn path=/trunk/mono/; revision=33415 + +commit 3a8f0a20bd939db788d3fd871b4c0ca37a4d0f96 +Author: Ben Maurer +Date: Wed Sep 1 01:04:04 2004 +0000 + + Support short forms of push imm + + svn path=/trunk/mono/; revision=33128 + +commit e11c33f0ae258eb62dd5fc2e4c6ce12952d25233 +Author: Zoltan Varga +Date: Sun Aug 29 21:04:04 2004 +0000 + + 2004-08-30 Zoltan Varga + + * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX + generation. + + svn path=/trunk/mono/; revision=33003 + +commit b0791969d5ddbcb465d86bcd42c86150f653a9a1 +Author: Zoltan Varga +Date: Sun Aug 29 11:11:38 2004 +0000 + + 2004-08-29 Zoltan Varga + + * amd64/amd64-codegen.h: More SSE work. + + svn path=/trunk/mono/; revision=32992 + +commit 8ca359bb4894521802e1f2044ec55a9aada4c08e +Author: Zoltan Varga +Date: Sun Aug 29 09:41:22 2004 +0000 + + 2004-08-29 Zoltan Varga + + * amd64/amd64-codegen.h: Add SSE2 instructions. + + svn path=/trunk/mono/; revision=32991 + +commit 39a59671ff853ab672d9db1c982093ee1c7cc1f8 +Author: Zoltan Varga +Date: Sat Aug 21 20:07:37 2004 +0000 + + 2004-08-21 Zoltan Varga + + * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG + since under amd64, all 16 registers have a low part. + + svn path=/trunk/mono/; revision=32632 + +commit c6a18db1cda9d62eaba7e1095f34eb84e7c39a8b +Author: Zoltan Varga +Date: Mon Aug 16 12:58:06 2004 +0000 + + 2004-08-16 Zoltan Varga + + * x86/x86-codegen.h: Add macros for accessing the mod/rm byte. + + svn path=/trunk/mono/; revision=32365 + +commit 7f2d7df98341055eaf370855c499508599770dec +Author: Ben Maurer +Date: Sat Aug 14 18:28:26 2004 +0000 + + hush cvs + + svn path=/trunk/mono/; revision=32344 + +commit ee4209b85e88e6adfc07a057b41747607235805c +Author: Neale Ferguson +Date: Fri Aug 6 16:28:23 2004 +0000 + + Support the MEMCPY(base, base) rule and add initial ARGLIST support + + svn path=/trunk/mono/; revision=31985 + +commit ee8712fd77bdd445d98c511a07f29b5136368201 +Author: Miguel de Icaza +Date: Thu Aug 5 23:28:29 2004 +0000 + + Add s390x + + svn path=/trunk/mono/; revision=31966 + +commit 17467e9a25e9a1cf71c170fd85e042a5a11a0f05 +Author: Neale Ferguson +Date: Wed Aug 4 20:43:11 2004 +0000 + + Further 64-bit S/390 updates + + svn path=/trunk/mono/; revision=31898 + +commit 4ad821169050e70979e71bbd5229557570059139 +Author: Neale Ferguson +Date: Wed Aug 4 02:54:52 2004 +0000 + + S/390 64-bit support + tailc processing fix for S/390 32-bit + + svn path=/trunk/mono/; revision=31840 + +commit 5ebecc33aca9878d2071c8766e5741cd6434d676 +Author: Neale Ferguson +Date: Fri Jul 30 23:11:29 2004 +0000 + + Add some s390 specific tests + + svn path=/trunk/mono/; revision=31690 + +commit 4e44c97a16962680e5009c97c0022e10ddbbad30 +Author: Neale Ferguson +Date: Fri Jul 30 18:23:23 2004 +0000 + + Optimize code generation macros and standardize + + svn path=/trunk/mono/; revision=31683 + +commit 57ac232b2805d02a4e2b6322ed9532313337e56c +Author: Zoltan Varga +Date: Fri Jul 30 16:01:49 2004 +0000 + + 2004-07-30 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + + svn path=/trunk/mono/; revision=31664 + +commit 128d13d3973f07f5afba3ac7022bd9a4e7550626 +Author: Ben Maurer +Date: Thu Jul 29 17:10:53 2004 +0000 + + 2004-07-29 Ben Maurer + + * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm + + svn path=/trunk/mono/; revision=31622 + +commit 77b5d5d9a5c508cef6a93be733818c446b9fe12c +Author: Zoltan Varga +Date: Wed Jul 28 20:14:03 2004 +0000 + + 2004-07-28 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + + svn path=/trunk/mono/; revision=31586 + +commit a451b99d1a51fe3ffa7334ffbe6865f388e549c0 +Author: Zoltan Varga +Date: Sat Jul 24 18:29:32 2004 +0000 + + 2004-07-24 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + + svn path=/trunk/mono/; revision=31431 + +commit b58d4fba4fad9c9cd52604adf39ffe578e407b14 +Author: Zoltan Varga +Date: Fri Jul 23 20:05:59 2004 +0000 + + 2004-07-23 Zoltan Varga + + * amd64/amd64-codegen.h: Ongoing JIT work. + + svn path=/trunk/mono/; revision=31426 + +commit c7d11ced2179a38a406489b57f4a2f317fbe5da3 +Author: Zoltan Varga +Date: Fri Jul 23 16:07:08 2004 +0000 + + 2004-07-23 zovarga + + * amd64/amd64-codegen.h: Ongoing JIT work. + + svn path=/trunk/mono/; revision=31416 + +commit f69c71790b01b62dd17d4479db005c3ef68e5e38 +Author: Neale Ferguson +Date: Mon Jul 12 23:03:57 2004 +0000 + + Add mvcl instruction + + svn path=/trunk/mono/; revision=31055 + +commit c9c82671d87761dc9a06b78082402924cf8f540d +Author: Neale Ferguson +Date: Mon Jul 12 12:05:08 2004 +0000 + + Add instructions to support enhanced memory-to-memory operations. + + svn path=/trunk/mono/; revision=31039 + +commit 08a92e1c00c0a0cf3c446257b446939062605260 +Author: Zoltan Varga +Date: Wed Jun 30 15:04:48 2004 +0000 + + 2004-06-30 Zoltan Varga + + * sparc/sparc-codegen.h: Add SPARC64 support. + + svn path=/trunk/mono/; revision=30577 + +commit d1881ea0cd90053526fa30405f4aeac90e06b485 +Author: Neale Ferguson +Date: Fri Jun 18 20:03:01 2004 +0000 + + Fix broken ABI for stack parameters + + svn path=/trunk/mono/; revision=29915 + +commit 4e0bce5ca726ed3d2a33d6cfdc3b41b04fcb91f8 +Author: Paolo Molaro +Date: Thu Jun 17 16:25:19 2004 +0000 + + API cleanup fixes. + + svn path=/trunk/mono/; revision=29787 + +commit 1ac8bbc10c8f2cff9fe8aef20bee51612aa77f88 +Author: Paolo Molaro +Date: Wed Jun 16 15:24:15 2004 +0000 + + Wed Jun 16 18:11:41 CEST 2004 Paolo Molaro + + * Makefile.am, *.c, *.h: more API cleanups. + + svn path=/trunk/mono/; revision=29691 + +commit cf789b0df2ab67298e712242ca201bd01d38c254 +Author: Paolo Molaro +Date: Fri May 21 13:04:55 2004 +0000 + + More encoding fixes. + + svn path=/trunk/mono/; revision=27820 + +commit 47892f7ea09d90ff4385b3f9c3796d5ce80ee76d +Author: Paolo Molaro +Date: Mon May 10 14:37:42 2004 +0000 + + Fix macros. + + svn path=/trunk/mono/; revision=27028 + +commit e85ff74df8db9dbeaa2f923b2d4b451fd84dcdc0 +Author: Bernie Solomon +Date: Sat May 8 01:03:26 2004 +0000 + + 2004-05-07 Bernie Solomon + + * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32 + + svn path=/trunk/mono/; revision=26957 + +commit f4dcc4e46be455a7a289a969529ba4a1cd0bc3f3 +Author: Neale Ferguson +Date: Fri May 7 19:53:40 2004 +0000 + + Bring s390 JIT up to date. + + svn path=/trunk/mono/; revision=26943 + +commit e79a83571f6126771c5e997560dd7e15c540df3f +Author: Bernie Solomon +Date: Fri Apr 30 03:47:45 2004 +0000 + + 2004-04-29 Bernie Solomon + + * ppc/tramp.c: use sizeof (stackval), fix + delegate tramp frame layout for Apple + + svn path=/trunk/mono/; revision=26383 + +commit f05e6864576c8c9e827cf6affbaff770732628d4 +Author: Paolo Molaro +Date: Thu Apr 29 18:59:24 2004 +0000 + + Fix stmw opcode with signed offsets. + + svn path=/trunk/mono/; revision=26328 + +commit 92e3edf52f04c550767f3ae59c0f7fcefb46cbf8 +Author: Urs C. Muff +Date: Wed Apr 28 03:59:07 2004 +0000 + + cleanup + + svn path=/trunk/mono/; revision=26114 + +commit ab07311f8d1aeb258795fc72c5ed216f603db092 +Author: David Waite +Date: Tue Apr 27 04:13:19 2004 +0000 + + 2004-04-26 David Waite + + * unknown.c: modify to have content for defined platforms (to + avoid ISO C warning) + + svn path=/trunk/mono/; revision=26036 + +commit 9b84c8398a2558c61613ec50d3c3546627ac1e2d +Author: Raja R Harinath +Date: Tue Apr 13 04:31:05 2004 +0000 + + ignores + + svn path=/trunk/mono/; revision=25379 + +commit 8adf42aeb550308e5a30e4308ad639fafa27e7e3 +Author: Bernie Solomon +Date: Tue Mar 30 01:44:17 2004 +0000 + + 2004-03-29 Bernie Solomon + + * hppa/hppa-codegen.h: + fix displacements in FP instrs + + svn path=/trunk/mono/; revision=24755 + +commit e82c4f6b16e7d3a7bdabe2df046b7ce17d91e716 +Author: Bernie Solomon +Date: Tue Mar 30 01:18:11 2004 +0000 + + 2004-03-29 Bernie Solomon + + * amd64/tramp.c: + * arm/tramp.c: + * hppa/tramp.c: + * ppc/tramp.c: + * s390/tramp.c: + * sparc/tramp.c: + * x86/tramp.c: + remove child from MonoInvocation as it isn't used. + + svn path=/trunk/mono/; revision=24751 + +commit 73296dcd03106668c5db4511948983bdadeaee2f +Author: Bernie Solomon +Date: Tue Mar 23 22:01:55 2004 +0000 + + 2004-03-23 Bernie Solomon + + * hppa/hppa-codegen.h: created + + * hppa/tramp.c: changed style to be more like + other platforms. + + * hppa/Makefile.am: add hppa-codegen.h + + svn path=/trunk/mono/; revision=24504 + +commit 6e46d909fa182adf4051e1a3c07bae63b93a2bc3 +Author: Zoltan Varga +Date: Tue Mar 16 19:22:52 2004 +0000 + + 2004-03-16 Zoltan Varga + + * sparc/sparc-codegen.h: Add v9 branches with prediction. + + svn path=/trunk/mono/; revision=24153 + +commit 49a337364d8413d2528fe97e68f16ef610bb3c6a +Author: Miguel de Icaza +Date: Tue Mar 16 16:20:03 2004 +0000 + + Add + + svn path=/trunk/mono/; revision=24136 + +commit ce4b3b024bba2c8bd4d874a75ef7aa23e118abf7 +Author: Miguel de Icaza +Date: Tue Mar 16 16:16:35 2004 +0000 + + Rename, since stupid cvs gets confused with the dash in x86-64 + + svn path=/trunk/mono/; revision=24134 + +commit 01dc8bdaddab8f9b1c939716c36d13a35cf2494d +Author: Miguel de Icaza +Date: Tue Mar 16 16:16:07 2004 +0000 + + Added back + + svn path=/trunk/mono/; revision=24133 + +commit a97ef493bb1e42b3afa548e47e3e14afe028b3ef +Author: Miguel de Icaza +Date: Tue Mar 16 16:03:49 2004 +0000 + + Add x86-64 + + svn path=/trunk/mono/; revision=24131 + +commit 25f79c5f1b26de4e7a413128d37731e1fcf09f14 +Author: Bernie Solomon +Date: Tue Mar 16 00:02:55 2004 +0000 + + 2004-03-15 Bernie Solomon + + * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg + so Sun's dis command recognizes it. + + svn path=/trunk/mono/; revision=24084 + +commit 38dd3d4c585c7e9cc116b7dfb5e89356c4d02da2 +Author: Zoltan Varga +Date: Mon Mar 15 17:28:56 2004 +0000 + + 2004-03-15 Zoltan Varga + + * sparc/sparc-codegen.h: Add some v9 instructions. + + svn path=/trunk/mono/; revision=24050 + +commit 36d64a0bbf92ca51335ddcb87627a8194f601820 +Author: Zoltan Varga +Date: Thu Mar 11 18:23:26 2004 +0000 + + 2004-03-11 Zoltan Varga + + * sparc/sparc-codegen.h: Ongoing sparc work. + + svn path=/trunk/mono/; revision=23926 + +commit 7e46377b331225994068d848d9ff8ceaeb96d38a +Author: Duncan Mak +Date: Mon Mar 8 01:47:03 2004 +0000 + + 2004-03-07 Duncan Mak + + * Makefile.am: Removed the reference to 'x86-64'. This was the cause + of the missing Mono daily tarballs, 'make dist' wasn't working. + + We do have an 'amd64' directory, but it doesn't make it in 'make + dist'. + + svn path=/trunk/mono/; revision=23784 + +commit 94156ea640c77f37c64332acd21adf4170ecb67b +Author: Miguel de Icaza +Date: Sat Feb 28 15:53:18 2004 +0000 + + Add + + svn path=/trunk/mono/; revision=23562 + +commit c2492eb99fe2c3e148a8dc629cc283fafad7af7c +Author: Miguel de Icaza +Date: Fri Feb 27 17:03:30 2004 +0000 + + Remove amd64 + + svn path=/trunk/mono/; revision=23540 + +commit c58af24e593b96f1ccc7819ab100063aa4db3c54 +Author: Miguel de Icaza +Date: Fri Feb 27 17:03:17 2004 +0000 + + Add x86-64 directory + + svn path=/trunk/mono/; revision=23539 + +commit 7fd6186b66f081ef6c0fca7708ddf8a641a09eae +Author: Miguel de Icaza +Date: Tue Feb 24 18:01:50 2004 +0000 + + Add amd64 support patch from Zalman Stern + + svn path=/trunk/mono/; revision=23411 + +commit 5d0cafa77c2cd95cb92a2990184bac64ec287016 +Author: Zoltan Varga +Date: Thu Feb 19 14:14:37 2004 +0000 + + 2004-02-19 Zoltan Varga + + * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones. + + svn path=/trunk/mono/; revision=23248 + +commit f9f3c20b070f92bcf6f85f5bd68a24c3434fe6c4 +Author: Zoltan Varga +Date: Thu Feb 19 14:13:23 2004 +0000 + + 2004-02-19 Zoltan Varga + + * sparc/tramp.c: Fix alignment of structures containing doubles. + + svn path=/trunk/mono/; revision=23247 + +commit bb16201aaa018434f551c2657d9e38f28dfe8904 +Author: Zoltan Varga +Date: Mon Feb 2 15:56:15 2004 +0000 + + 2004-02-02 Zoltan Varga + + * sparc/tramp.c: Implement all floating point argument passing conventions in + Sparc V8. Also fix structure passing in V8. + + svn path=/trunk/mono/; revision=22704 + +commit 66607f84556593e2c3aa39bba418801193b6fddf +Author: Miguel de Icaza +Date: Sun Jan 18 18:00:40 2004 +0000 + + Apply patches from Neale Ferguson for s390 support + + svn path=/trunk/mono/; revision=22226 + +commit 963e1b962894e9b434a2e80e63394bd0d34e68b8 +Author: Paolo Molaro +Date: Sat Jan 3 21:42:37 2004 +0000 + + Codegen macros for mips. + + svn path=/trunk/mono/; revision=21658 + +commit 7e4789fdfc87f75e63612fe0aca1f66d76134ba9 +Author: Paolo Molaro +Date: Wed Dec 3 16:48:07 2003 +0000 + + Typo fix. + + svn path=/trunk/mono/; revision=20745 + +commit 96651158bf48aa1c31b5f2e3ca4cbf904211b1dc +Author: Paolo Molaro +Date: Thu Nov 13 15:23:48 2003 +0000 + + Thu Nov 13 16:24:29 CET 2003 Paolo Molaro + + * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct. + + svn path=/trunk/mono/; revision=19938 + +commit ebebe8e4565897dfaad69911c88f4dda134d4b84 +Author: Zoltan Varga +Date: Fri Oct 31 13:03:36 2003 +0000 + + 2003-10-31 Zoltan Varga + + * */tramp.c (mono_create_method_pointer): Rename to + mono_arch_create_method_pointer, move common code to a new function in + interp.c. + + * */tramp.c (mono_create_trampoline): Rename to + mono_arch_create_trampoline for consistency. + + svn path=/trunk/mono/; revision=19500 + +commit c41c989929efaf77826634392c8ce9c54525809d +Author: Bernie Solomon +Date: Tue Oct 14 05:17:17 2003 +0000 + + 2003-10-13 Bernie Solomon + + * x86/tramp.c: restore EDX after memcpy call + + svn path=/trunk/mono/; revision=19024 + +commit e4f9a75ed58f5ca214a685041f2a538e2f40fe1f +Author: Bernie Solomon +Date: Mon Oct 13 22:56:37 2003 +0000 + + 2003-10-13 Bernie Solomon + + * Makefile.am: add hppa subdir + + svn path=/trunk/mono/; revision=18999 + +commit fa30eb232e53c9e39eec1bd44189e8ac29ba1644 +Author: Bernie Solomon +Date: Mon Oct 13 22:48:11 2003 +0000 + + 2003-10-13 Bernie Solomon + + * hppa/tramp.c: add initial implementation - this is 64 bit only + hppa/Makefile.am hppa/.cvsignore: added + + svn path=/trunk/mono/; revision=18996 + +commit 0b0945abf1e873f6a8dfb527236d8cce2ce15574 +Author: Bernie Solomon +Date: Mon Oct 13 22:38:25 2003 +0000 + + 2003-10-13 Bernie Solomon + + * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation + for V9 (64 bit), cover more 32 bit cases as well. + + svn path=/trunk/mono/; revision=18995 + +commit 6519bafeae686f3b32870a17dc1c84ae90ec95f9 +Author: Zoltan Varga +Date: Wed Sep 3 08:10:57 2003 +0000 + + 2003-09-03 Zoltan Varga + + * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). + + svn path=/trunk/mono/; revision=17839 + +commit 935c93eeaff3ad8ccee032ade3584a7f6ab8f4a1 +Author: Ben Maurer +Date: Mon Aug 25 13:38:19 2003 +0000 + + .cvsignore update + + svn path=/trunk/mono/; revision=17581 + +commit 0fed0582997210e2a0ac71a527dbd319a85aebcb +Author: ct +Date: Sun Aug 24 22:49:45 2003 +0000 + + completed the set of floating point ops + + svn path=/trunk/mono/; revision=17564 + +commit 3d0f6d935e3a9c180d0bbb14fc371d40e53b7872 +Author: Zoltan Varga +Date: Thu Aug 21 15:23:31 2003 +0000 + + 2003-08-21 Zoltan Varga + + * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com). + + svn path=/trunk/mono/; revision=17470 + +commit ed628ad0776db600fab8d5e4bcd6b563f5e808fd +Author: ct +Date: Tue Aug 19 03:04:34 2003 +0000 + + added more asm macros for floating point subtraction of single/double/quad + + svn path=/trunk/mono/; revision=17394 + +commit 6260d65a087be486df039c80eba92e44eb7a220d +Author: ct +Date: Tue Aug 19 02:53:23 2003 +0000 + + added floating point instructions for adding double, single, and quad numbers + + svn path=/trunk/mono/; revision=17393 + +commit c750ad8fea95e1fc81150e516ee26fbe79ab570d +Author: Paolo Molaro +Date: Thu Aug 7 14:13:05 2003 +0000 + + Fixed imm16 range check. + + svn path=/trunk/mono/; revision=17157 + +commit ebc38557433accd79fce2e38dff0505dfded5691 +Author: Paolo Molaro +Date: Thu Jul 31 14:32:42 2003 +0000 + + Thu Jul 31 16:19:07 CEST 2003 Paolo Molaro + + * configure.in, etc.: portability fixes and support for + buidling outside the srcdir from Laurent Morichetti . + + svn path=/trunk/mono/; revision=16937 + +commit 6e851a87092161092c6e8f06f4de13fb45bc04a6 +Author: Paolo Molaro +Date: Tue Jul 1 11:12:47 2003 +0000 + + Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro + + * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us). + + svn path=/trunk/mono/; revision=15809 + +commit c439e3df5cfa7c67d976258228cb9188a218c21d +Author: Paolo Molaro +Date: Wed Jun 25 13:18:00 2003 +0000 + + FP control word enum. + + svn path=/trunk/mono/; revision=15623 + +commit 2ad34b0dc225bf0b2efeea63c2f9287a1dbad162 +Author: Paolo Molaro +Date: Mon Jun 9 18:28:54 2003 +0000 + + Small updates. + + svn path=/trunk/mono/; revision=15250 + +commit df86960d595f0284a453fe3fc67687b707148dbf +Author: Paolo Molaro +Date: Wed May 21 17:57:05 2003 +0000 + + Some fixes and more complete support. + + svn path=/trunk/mono/; revision=14769 + +commit 3af153bd53728da9da9215141b1341d60b447bd3 +Author: Dietmar Maurer +Date: Wed May 21 12:45:22 2003 +0000 + + 2003-05-21 Dietmar Maurer + + * mini-x86.c (mono_arch_get_allocatable_int_vars): dont allocate + I1 to registers because there is no simply way to sign extend 8bit + quantities in caller saved registers on x86. + + * inssel-float.brg: set costs of some rules to 2 so + that monobure always select the arch. specific ones if supplied, + regardless of the order we pass the files to monoburg. + + svn path=/trunk/mono/; revision=14757 + +commit c4eeb3dfdd19546fb0712e5306d8d96a9a07580e +Author: Dietmar Maurer +Date: Tue May 20 10:44:31 2003 +0000 + + 2003-05-20 Dietmar Maurer + + * mini-x86.c (mono_arch_get_allocatable_int_vars): allocate 8/16 + bit values to registers + + svn path=/trunk/mono/; revision=14720 + +commit 3a48ea89b161b268bb74f013cc36f6aec59e550b +Author: Malte Hildingson +Date: Thu May 1 23:42:01 2003 +0000 + + * tramp.c (mono_create_trampoline): tiny register allocation fix for reference types + + svn path=/trunk/mono/; revision=14195 + +commit 7595b109642f29ffe0cf8bb3e4411243b92a606f +Author: Malte Hildingson +Date: Sun Apr 27 16:04:54 2003 +0000 + + * tramp.c (alloc_code_buff): posix memory protection. + (mono_create_trampoline): new string marshaling + minor fixes. + (mono_create_method_pointer): delegates fix. + + svn path=/trunk/mono/; revision=14046 + +commit dfe276d1e1d116b113a639eecbc14c3661af5462 +Author: Sergey Chaban +Date: Sun Apr 27 14:50:16 2003 +0000 + + arm-WMMX.h: initial WirelessMMX support for ARM codegen; + + svn path=/trunk/mono/; revision=14044 + +commit 27eb0661916c7c65b43def99be92895c61f4d315 +Author: Sergey Chaban +Date: Sun Apr 27 14:47:57 2003 +0000 + + * ARM codegen update; + + svn path=/trunk/mono/; revision=14043 + +commit e1b54daadf68eef0608ac03bd6fe4dc374d78675 +Author: Paolo Molaro +Date: Sun Apr 27 11:40:11 2003 +0000 + + Make the debugging output off by default. + + svn path=/trunk/mono/; revision=14039 + +commit e679a120b848ea9e35e7c8a38ca3e03a386371c7 +Author: Patrik Torstensson +Date: Fri Feb 14 10:01:29 2003 +0000 + + 2003-02-14 Patrik Torstensson + + * x86-codegen.h: Added fstsw op code for getting fp flags + + svn path=/trunk/mono/; revision=11577 + +commit f468e62377dfe3079f5b2bade1f43d239842e381 +Author: Paolo Molaro +Date: Sat Feb 1 10:02:52 2003 +0000 + + Sat Feb 1 10:59:31 CET 2003 Paolo Molaro + + * alpha/*: update from Laramie. + + svn path=/trunk/mono/; revision=11090 + +commit cc3953655f65398b40e11fdcc97b1ae47bebfdc1 +Author: Paolo Molaro +Date: Mon Jan 27 11:54:14 2003 +0000 + + Mon Jan 27 12:49:10 CET 2003 Paolo Molaro + + * alpha/*: start of the port to the alpha architecture by + Laramie Leavitt (). + + svn path=/trunk/mono/; revision=10942 + +commit 898dd64bddf69974ae9a22d6aa0ce9625fc9a5a0 +Author: Paolo Molaro +Date: Tue Jan 21 16:33:33 2003 +0000 + + Tue Jan 21 17:29:53 CET 2003 Paolo Molaro + + * ppc/ppc-codegen.h: completed ppc native code generation by + Taylor Christopher P . + + svn path=/trunk/mono/; revision=10778 + +commit d2321af1b58b2fbb84c3b2cf3f6c7c7db0a787a4 +Author: Paolo Molaro +Date: Fri Jan 17 20:17:58 2003 +0000 + + Fri Jan 17 21:14:18 CET 2003 Paolo Molaro + + * ppc/tramp.c: adapted to work for MacOSX (from a patch by + John Duncan). + + svn path=/trunk/mono/; revision=10630 + +commit 6d1b716753c1cc8a2f5c26338020941aa58ce9d7 +Author: Paolo Molaro +Date: Wed Jan 15 15:21:26 2003 +0000 + + Update to the API change of a while ago. + + svn path=/trunk/mono/; revision=10545 + +commit d4f44103ed442b9a6e221b58b68550c1de4dfa2b +Author: Mark Crichton +Date: Mon Nov 11 19:13:08 2002 +0000 + + Some debugging stubs. + + svn path=/trunk/mono/; revision=8922 + +commit b669ce7ac5106466cc6d57e9163ca5d6d80611aa +Author: Paolo Molaro +Date: Thu Oct 24 19:27:13 2002 +0000 + + s390 support from Neale Ferguson . + + svn path=/trunk/mono/; revision=8521 + +commit 457b666522f839e5e94e5fdda2284255b26d79a2 +Author: Mark Crichton +Date: Mon Oct 7 03:36:50 2002 +0000 + + Fix some minor trampoline nags. Now down to 15 failed tests. Delegate code + still broken, if anyone wants to help fix it. + + svn path=/trunk/mono/; revision=8041 + +commit b6d66c3ac8ae39c47b99dd8b8a7813e6f60c47e7 +Author: Mark Crichton +Date: Thu Oct 3 15:30:05 2002 +0000 + + Changes to tramp.c. Pass more tests. + + svn path=/trunk/mono/; revision=7966 + +commit e5d299dd18e820d33cf1d74e0e2de53e163cc07b +Author: Mark Crichton +Date: Wed Sep 25 04:50:10 2002 +0000 + + Stupid off-by-one error fixed. + + The problem was that I incremented gr as if we were on a PPC box. Sparc + doesn't need such "alignment" of the registers. + + svn path=/trunk/mono/; revision=7800 + +commit a9d8f44092c7c313efae893ff64306dc92985110 +Author: Mark Crichton +Date: Wed Sep 25 01:52:30 2002 +0000 + + arch/sparc/tramp.c: Fixed once again. Now works, mostly. + io-layer/atomic.h: It's sparc on gcc/solaris, and __sparc__ on gcc/linux. + had to add an #ifdef. + + svn path=/trunk/mono/; revision=7798 + +commit 0110bf4a5a435c5d60583887e0e0f28b7993a4cf +Author: Mark Crichton +Date: Mon Sep 23 02:25:43 2002 +0000 + + Starting rewrite of trampolining for SPARC. It needed some cleanup. + + It doesn't work at all now. GO PROGRESS! + + svn path=/trunk/mono/; revision=7728 + +commit fe7d0f819c55d76f0cb7a54ba66d4368d40385bd +Author: Mark Crichton +Date: Thu Sep 19 18:30:56 2002 +0000 + + Beginning to add support for Solaris. Tested on Solaris 9. + + Shared handles are still not working, will be addressed soon. + + Trampoline code still broken, expect a rewrite. + + svn path=/trunk/mono/; revision=7622 + +commit 13eb9f4ebf45ffe17d555458cec8bbecefc71849 +Author: Radek Doulik +Date: Wed Aug 28 15:26:29 2002 +0000 + + retval value type fixed + + svn path=/trunk/mono/; revision=7127 + +commit 63315827a2ebc424954f4b8baf40497a5600ce7a +Author: Radek Doulik +Date: Wed Aug 28 14:41:08 2002 +0000 + + fixed valuetypes marshaling in delegates + + svn path=/trunk/mono/; revision=7126 + +commit 82d4a3ff22ea8e8dfb9a3ec2be10657e7e25cd97 +Author: Radek Doulik +Date: Sat Aug 24 23:54:12 2002 +0000 + + fixed struct marshaling, 108 tests pass now + + svn path=/trunk/mono/; revision=7013 + +commit b94511c33193dc728e039fa776bf3b9d5dad4e5b +Author: Radek Doulik +Date: Wed Aug 21 17:47:34 2002 +0000 + + fixed delegates + + svn path=/trunk/mono/; revision=6862 + +commit fafa1892b8b0315cab29de09f09f2aa5041b61a7 +Author: Mark Crichton +Date: Tue Aug 20 15:03:07 2002 +0000 + + This nearly completes SPARC trampoline support for mint/mono. The delegate + code still needs some work. + + There are bugs. Send crash reports, as well as .cs code and exe's to + crichton@gimp.org + + Also, if anyone gets Bus Errors in the code, let me know as well, I've been + hunting down alignment bugs as well. + + svn path=/trunk/mono/; revision=6812 + +commit f8f8b65c484f48436941e4985cfb4b837cff4ceb +Author: Paolo Molaro +Date: Mon Aug 5 17:28:10 2002 +0000 + + Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix random memory read in mono_create_method_pointer. + + svn path=/trunk/mono/; revision=6436 + +commit dc11862f43a6240bcc35d2ef96fb04750c4bf930 +Author: Sergey Chaban +Date: Mon Aug 5 16:43:06 2002 +0000 + + x86-codegen.h: fixed bug in x86_memindex_emit, for basereg == EBP && disp == imm32; + + svn path=/trunk/mono/; revision=6433 + +commit 60179dd8c27bf3c080ca2c7db818c01a51c9d4b1 +Author: Dietmar Maurer +Date: Mon Aug 5 09:53:43 2002 +0000 + + 2002-08-05 Dietmar Maurer + + * x86/tramp.c (mono_create_trampoline): fixed stack_size bug + + svn path=/trunk/mono/; revision=6408 + +commit e13f4a98c6fe61ec768b0da9d8832814a313ed78 +Author: Radek Doulik +Date: Fri Aug 2 18:34:20 2002 +0000 + + more WIP + + svn path=/trunk/mono/; revision=6363 + +commit f73afba7e99de872e4e9d9dcf3c7c483632f6bc6 +Author: Radek Doulik +Date: Fri Aug 2 18:13:59 2002 +0000 + + more surgery + + svn path=/trunk/mono/; revision=6360 + +commit 347f6a854167fa5a26484b83736de86f5ffd8ea0 +Author: Radek Doulik +Date: Fri Aug 2 17:55:44 2002 +0000 + + did quick surgery to update for Dietmar's new code + + svn path=/trunk/mono/; revision=6359 + +commit cc4396df6db395836340d26ad2f2d920f946729f +Author: Dietmar Maurer +Date: Fri Aug 2 07:13:54 2002 +0000 + + 2002-08-02 Dietmar Maurer + + * marshal.c (mono_delegate_to_ftnptr): pass delegate->target + instead of the delegate itself as this pointer (bug #28383) + + svn path=/trunk/mono/; revision=6348 + +commit fbb833e1937ec3e3183bd1219e0f2391faa62718 +Author: Dietmar Maurer +Date: Thu Aug 1 14:17:18 2002 +0000 + + 2002-08-01 Dietmar Maurer + + * x86/tramp.c (mono_create_trampoline): also push the value type pointer for + methods returning value types. + (mono_create_method_pointer): support valuetype returns. + + * interp.c (ves_pinvoke_method): do not call stackval_from_data if the result + is a value type. + + svn path=/trunk/mono/; revision=6311 + +commit 27a4251f2a6fd091ddc8084ad14a8808c136431d +Author: Dietmar Maurer +Date: Thu Aug 1 06:40:11 2002 +0000 + + 2002-08-01 Dietmar Maurer + + * interp.c (stackval_from_data): add pinvoke argument + (stackval_to_data): add pinvoke argument. We need consider the + fact that unmanages structures may have different sizes. + + * x86/tramp.c (mono_create_method_pointer): allocate space for + value types. + + svn path=/trunk/mono/; revision=6308 + +commit 1be0ee94a17d2a4b7edb513d845d88ba5fed8285 +Author: Dietmar Maurer +Date: Wed Jul 31 11:53:19 2002 +0000 + + 2002-07-31 Dietmar Maurer + + * x86/tramp.c: (mono_create_method_pointer): return method->addr for pinvoke methods + + * interp.c (ves_exec_method): bug fix - directly jump to handle_exception. + + svn path=/trunk/mono/; revision=6280 + +commit 87f9fd554284e9d2037c8757a4211cf710a85ac0 +Author: Dietmar Maurer +Date: Wed Jul 31 11:00:53 2002 +0000 + + 2002-07-31 Dietmar Maurer + + * interp.c: use the new marshaling code. better delegate/remoting + support. + + * debug-helpers.c (mono_method_full_name): only print a number to + indicate wrapper type (so that the output is more readable in traces). + + * x86/tramp.c: remove code to handle PInvoke because this is no + longer needed. + + svn path=/trunk/mono/; revision=6278 + +commit ebf4ad275e84a3887798ac765bdc1f0ed457cd5a +Author: Paolo Molaro +Date: Fri Jul 19 12:21:01 2002 +0000 + + Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix float loads. Simple delegate marshaling fix. + + svn path=/trunk/mono/; revision=5909 + +commit 2b677a332d7e811ca9cc75d271d069787f0495c1 +Author: Radek Doulik +Date: Mon Jul 8 16:13:36 2002 +0000 + + 2002-07-08 Radek Doulik + + * ppc/tramp.c: marshaling for SZARRAY + + svn path=/trunk/mono/; revision=5650 + +commit ef9afb744f4679c465be380b4285928fff50db5e +Author: Radek Doulik +Date: Sat Jul 6 01:41:14 2002 +0000 + + 2002-07-05 Radek Doulik + + * ppc/tramp.c: removed magic hack + + svn path=/trunk/mono/; revision=5614 + +commit 02476784232f22f91e347750c3fb8018d770d057 +Author: Paolo Molaro +Date: Tue Jun 18 04:38:23 2002 +0000 + + Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro + + * x86/tramp.c: marshal simple arrays correctly. + + svn path=/trunk/mono/; revision=5316 + +commit 5ff6eebba3bc5e1662b84a34a276d6842e41ab87 +Author: Paolo Molaro +Date: Sat Jun 1 08:08:34 2002 +0000 + + Kill warning. + + svn path=/trunk/mono/; revision=5075 + +commit 0c268fdddc804751bba57401c02b139368f7a01c +Author: Paolo Molaro +Date: Fri May 31 10:55:37 2002 +0000 + + Compilation fixes. + + svn path=/trunk/mono/; revision=5054 + +commit 9fe623bf5c85da9328f895680d8688987a94427e +Author: Dietmar Maurer +Date: Thu May 30 11:04:53 2002 +0000 + + 2002-05-30 Dietmar Maurer + + * x86.brg (reg): bug fix in LOCALLOC + + * mono.c (main): new switch --nointrinsic to disable memcpy opt. + + * x86.brg: added block copy/init optimizations from + Serge (serge@wildwestsoftware.com) + + svn path=/trunk/mono/; revision=5025 + +commit 1b8d1ed7ce3e489dcf53cc2369a3d6d482d5901d +Author: Dietmar Maurer +Date: Tue May 28 12:23:00 2002 +0000 + + 2002-05-28 Dietmar Maurer + + * x86.brg: impl. CKFINITE + + svn path=/trunk/mono/; revision=4988 + +commit b0826d366f4f32c6ef772c0a9deef5a9b4157f0b +Author: Miguel de Icaza +Date: Mon May 27 22:56:15 2002 +0000 + + Updated copyright headers to the standard template + + svn path=/trunk/mono/; revision=4975 + +commit 027755140cf39776018e520f7cd838e319fb9a34 +Author: Dietmar Maurer +Date: Thu May 23 07:44:00 2002 +0000 + + 2002-05-23 Dietmar Maurer + + * delegate.c: move the thread pool to metadata/threadpool.c, code + cleanup. + + * threadpool.[ch]: impl. a threadpool that can + be used by mint and mono. + + svn path=/trunk/mono/; revision=4875 + +commit be70e94a20c2c1864f829122085bce03f24cc4e8 +Author: Radek Doulik +Date: Wed May 15 14:19:24 2002 +0000 + + fixed delegates return values + + svn path=/trunk/mono/; revision=4662 + +commit 89d436d12d5746d04d9f27d9897853f846d0500e +Author: Radek Doulik +Date: Mon May 13 19:00:42 2002 +0000 + + 2002-05-13 Radek Doulik + + * ppc/tramp.c (emit_save_parameters): fix I8 parameters + + svn path=/trunk/mono/; revision=4601 + +commit 8e8d0cf9ac1f4aa46da775bed8da214581345ddb +Author: Radek Doulik +Date: Mon May 13 17:24:04 2002 +0000 + + introduced DEBUG, disabled by default + + svn path=/trunk/mono/; revision=4599 + +commit 8d20a830d50aaf3f30869283332d654472f16890 +Author: Sergey Chaban +Date: Fri May 10 19:25:15 2002 +0000 + + * x86-codegen.h: renamed FP int macro for consistency (its arg is really a membase, not mem); + + svn path=/trunk/mono/; revision=4500 + +commit 9fb095d7866ee9963f11e3bd2dcc9b9930320ddc +Author: Radek Doulik +Date: Fri May 10 13:39:09 2002 +0000 + + updated for new strings + + svn path=/trunk/mono/; revision=4484 + +commit 5d0a1992c7fe0252457f6644198654d06ee7a19f +Author: Paolo Molaro +Date: Fri May 10 07:24:08 2002 +0000 + + Fix checks in x86_patch(). + + svn path=/trunk/mono/; revision=4473 + +commit 512203d918c6998f9652d23301b553c2bb205788 +Author: Sergey Chaban +Date: Mon May 6 16:39:01 2002 +0000 + + Logged changes to x86-codegen.h + + svn path=/trunk/mono/; revision=4344 + +commit 9d1e2b5076d08bd02eb28ad8b3f2a27a42449250 +Author: Sergey Chaban +Date: Mon May 6 16:33:54 2002 +0000 + + * x86-codegen.h: added missing shifts; + 8-bit ALU operations; + FPU ops with integer operand; + FIST (without pop); + + svn path=/trunk/mono/; revision=4343 + +commit 944736b70eb0689f094fe05c7184d36f7b7421bf +Author: Paolo Molaro +Date: Fri May 3 12:52:19 2002 +0000 + + Added some missing FP opcodes and made x86_patch() handle also the call opcode. + + svn path=/trunk/mono/; revision=4252 + +commit d8cf0bf0270efb923d7c6e80c4e5d547d1161740 +Author: Paolo Molaro +Date: Mon Apr 29 12:14:39 2002 +0000 + + Removed mono_string_new_wrapper(). + + svn path=/trunk/mono/; revision=4151 + +commit cc03dca33b721c5b46cba47ff7a7bb80b820be6d +Author: Paolo Molaro +Date: Mon Apr 22 07:32:11 2002 +0000 + + Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro + + * x86/x86-codegen.h: added loop instructions and made x86_patch fully + useful. + + svn path=/trunk/mono/; revision=3950 + +commit ab877e78de2c3ac01664dc13c13c2f231fca4c11 +Author: Dietmar Maurer +Date: Sat Apr 20 14:32:46 2002 +0000 + + 2002-04-20 Dietmar Maurer + + * interp.c (ves_exec_method): support internalcall String constructors + + svn path=/trunk/mono/; revision=3925 + +commit d4ccb473cf835fd07294b7da6a6d4da9e2022dcd +Author: Paolo Molaro +Date: Wed Apr 10 12:34:16 2002 +0000 + + Forgot to commit. + + svn path=/trunk/mono/; revision=3740 + +commit 9116ce23467ea863a99b860849d867802c32187a +Author: Paolo Molaro +Date: Sat Apr 6 10:40:58 2002 +0000 + + Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro + + * x86/tramp.c: fix advancement od argument position on the stack. + + svn path=/trunk/mono/; revision=3652 + +commit bf0fa05ecc5f3537597c10704414544c50d3a0ed +Author: Paolo Molaro +Date: Thu Apr 4 04:42:46 2002 +0000 + + Remove useless comments in rules. + + svn path=/trunk/mono/; revision=3595 + +commit 3f3f1e23c3cced2e37ec49361ee3236c524ed107 +Author: Dietmar Maurer +Date: Sat Mar 30 11:19:26 2002 +0000 + + fixed compiler warnings + + svn path=/trunk/mono/; revision=3514 + +commit 793cfcbae98d4847ff08aff44ffa27020260c317 +Author: Paolo Molaro +Date: Sat Mar 16 14:37:28 2002 +0000 + + Sat Mar 16 19:12:57 CET 2002 Paolo Molaro + + * x86/tramp.c: increase default allocated size for trampolines + and assert on overflow. + + svn path=/trunk/mono/; revision=3143 + +commit af361d9d30702937e3cd9412b987552f4652887a +Author: Dietmar Maurer +Date: Thu Mar 14 09:52:53 2002 +0000 + + 2002-03-14 Dietmar Maurer + + * emit-x86.c (arch_create_native_wrapper): new code to generate + wrappers for calling native functions. + + * icall.c (ves_icall_InternalInvoke): impl. + + svn path=/trunk/mono/; revision=3103 + +commit 670be867554bb6f1ed61a17649e21d0e25f66105 +Author: Paolo Molaro +Date: Mon Mar 11 11:24:33 2002 +0000 + + Mon Mar 11 16:14:29 CET 2002 Paolo Molaro + + * x86/x86-codegen.h: addex x86_clear_reg() and changed + x86_mov_reg_imm() to not check for imm == 0. + + svn path=/trunk/mono/; revision=3051 + +commit 51d24bbb570af055b885dfe9f06e7717e4bb3b98 +Author: Dietmar Maurer +Date: Thu Feb 28 09:35:29 2002 +0000 + + impl. more CONV opcodes + + svn path=/trunk/mono/; revision=2761 + +commit d0370e0ab841b63f60170f3afcae9ee49e9faade +Author: Paolo Molaro +Date: Thu Feb 28 07:43:49 2002 +0000 + + Thu Feb 28 12:34:21 CET 2002 Paolo Molaro + + * x86/tramp.c: start handling of more complex marshaling stuff. + + + Thu Feb 28 12:33:41 CET 2002 Paolo Molaro + + * marshal.c, marshal.h: start of marshaling interface. + + svn path=/trunk/mono/; revision=2759 + +commit 29f73f5799fb9274a44c918cb4f63c606f765b96 +Author: Sergey Chaban +Date: Wed Feb 27 09:12:27 2002 +0000 + + * Makefile.am: removed SCRIPT_SOURCES to fix automake issues. + + svn path=/trunk/mono/; revision=2710 + +commit a8b6a875977b2728019ea7cf2ea8dd432fe4469a +Author: Sergey Chaban +Date: Mon Feb 25 08:58:43 2002 +0000 + + * ChangeLog: ARM-related log entry. + + svn path=/trunk/mono/; revision=2628 + +commit f703ca24db3d380b37434e9f1cced6d0b45a5470 +Author: Sergey Chaban +Date: Mon Feb 25 08:56:57 2002 +0000 + + * Makefile.am: added arm to DIST_SUBDIRS. + + svn path=/trunk/mono/; revision=2627 + +commit f107fb14e6c183972bec81e5727381f44c6a5333 +Author: Radek Doulik +Date: Sun Feb 24 20:46:13 2002 +0000 + + (mono_create_method_pointer): implements delegates with parameters + and return value + + svn path=/trunk/mono/; revision=2618 + +commit 2217d1a7da2572afd033b958454b9662c42022b9 +Author: Sergey Chaban +Date: Sun Feb 24 17:44:55 2002 +0000 + + * ARM support sources, initial check-in; + + svn path=/trunk/mono/; revision=2615 + +commit 56dde5e20e11f2d9d2a3522923a5a4729bed469f +Author: Radek Doulik +Date: Sun Feb 24 01:40:17 2002 +0000 + + 2002-02-24 Radek Doulik + + * ppc/tramp.c (mono_create_method_pointer): basic delegates + implementation, it works for simple delegates now and I am already + pretty close to have it working for every delegates, but I am + going to sleep and finish it tomorrow? + + svn path=/trunk/mono/; revision=2611 + +commit 0c4f3b00c8e831077c6ba1b28065e7be81bbff61 +Author: Jeffrey Stedfast +Date: Fri Feb 22 19:43:09 2002 +0000 + + 2002-02-22 Jeffrey Stedfast + + * sparc/tramp.c (mono_create_trampoline): Much tinkering to get + the opcodes more correct. Still needs a lot of work. + + svn path=/trunk/mono/; revision=2602 + +commit 6bb3f7ead4ab8d574273f5bdacf32b29809ace80 +Author: Radek Doulik +Date: Tue Feb 19 20:57:29 2002 +0000 + + ops, fix return value passing + + svn path=/trunk/mono/; revision=2526 + +commit 725e90ef0e13752e357358ddef152a30beae174f +Author: Radek Doulik +Date: Tue Feb 19 20:50:13 2002 +0000 + + added stack saving for most arguments + + svn path=/trunk/mono/; revision=2523 + +commit 5dbc4bd3639f2d012a1103ae1b0f911768e460ab +Author: Radek Doulik +Date: Tue Feb 19 19:49:10 2002 +0000 + + 2002-02-19 Radek Doulik + + * ppc/tramp.c (emit_save_parameters): don't start saving 64bit + values to + even registers + + svn path=/trunk/mono/; revision=2519 + +commit e756cc154586ebdd6f4bba8b730fca09611874cf +Author: Paolo Molaro +Date: Tue Feb 19 15:40:57 2002 +0000 + + Tue Feb 19 20:19:38 CET 2002 Paolo Molaro + + * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge). + + + Tue Feb 19 20:20:15 CET 2002 Paolo Molaro + + * dump.c: the prolog is before each arg in the custom attribute blob. + + svn path=/trunk/mono/; revision=2513 + +commit 1da21d342a98bedfc9295846080043d8946f4029 +Author: Radek Doulik +Date: Sun Feb 17 21:10:29 2002 +0000 + + la la la, ChangeLog entries + + svn path=/trunk/mono/; revision=2463 + +commit b7fa0baa6c15d3ee14a1b67dd5b56d21a931894b +Author: Radek Doulik +Date: Sun Feb 17 20:02:39 2002 +0000 + + (mono_string_new_wrapper): new helper function, cut&pasted from + x86, modified to check for NULL text to avoid branching in + generated code + (calculate_sizes): updated for string retval changes + (emit_call_and_store_retval): updated for string retval + + svn path=/trunk/mono/; revision=2461 + +commit 2cee2566ae50aa32e13864135260e16fd21bfac1 +Author: Radek Doulik +Date: Sun Feb 17 19:41:12 2002 +0000 + + 2002-02-17 Radek Doulik + + * ppc/tramp.c: fixed minimal stack size, fixed string parameters, + fix byte and half word parameters + + * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth + + svn path=/trunk/mono/; revision=2460 + +commit c6fd0cb7010239a29091a50aa5354e96f74bedf2 +Author: Dietmar Maurer +Date: Wed Feb 13 12:22:52 2002 +0000 + + added some docu + + svn path=/trunk/mono/; revision=2372 + +commit 6b6716c9eaa66549c9c1cf86934a54a830afc1b6 +Author: Dietmar Maurer +Date: Wed Feb 13 08:29:02 2002 +0000 + + pass the domain to mono_string_new + + svn path=/trunk/mono/; revision=2365 + +commit 0ffc7e417ee15973120c4f3a0cb0f2732c5c6633 +Author: Miguel de Icaza +Date: Mon Feb 11 22:48:46 2002 +0000 + + More + + svn path=/trunk/mono/; revision=2341 + +commit 6f7cdfa857058ee3662e1662190315c294188ae0 +Author: Paolo Molaro +Date: Mon Feb 11 13:49:06 2002 +0000 + + Mon Feb 11 18:40:04 CET 2002 Paolo Molaro + + * sparc/*: sparc codegen header and some untested trampoline code. + + svn path=/trunk/mono/; revision=2315 + +commit d7a858a6ac5bc37435a157cf41eb63818905a7ea +Author: Paolo Molaro +Date: Mon Feb 11 07:42:10 2002 +0000 + + Mon Feb 11 12:32:35 CET 2002 Paolo Molaro + + * x86/tramp.c: fix handling of multiple marshaleed strings. + * x86/x86-codegen.h: some code to patch branch displacements. + + svn path=/trunk/mono/; revision=2308 + +commit dd029fa4245c99073ae6863dcb8e1560cc1eedc0 +Author: Dietmar Maurer +Date: Fri Feb 1 12:04:34 2002 +0000 + + SHR/SHL impl. + + svn path=/trunk/mono/; revision=2224 + +commit 4a977a50d70eb75760d9555854845d32595c4093 +Author: Paolo Molaro +Date: Fri Feb 1 11:22:35 2002 +0000 + + Fri Feb 1 16:03:53 CET 2002 Paolo Molaro + + * interp.c: exception fixes. Use mono_method_pointer_get () + to easy porting to other archs. Some support for overflow detection. + + Fri Feb 1 16:03:00 CET 2002 Paolo Molaro + + * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get (). + + + Fri Feb 1 16:13:20 CET 2002 Paolo Molaro + + * class.c: add asserts if we are ever going to scribble over memory. + * socket-io.c: not all systems have AF_IRDA defined. + + svn path=/trunk/mono/; revision=2223 + +commit 2d3dbc6213f3e12d1c7b332d80fec81384612bf8 +Author: Miguel de Icaza +Date: Thu Jan 24 01:00:53 2002 +0000 + + 2002-01-23 Miguel de Icaza + + * x86/tramp.c (mono_create_trampoline): Do not try to create a + mono_string_new if the return value from the PInvoke code is + NULL. + + 2002-01-23 Miguel de Icaza + + * genwrapper.pl: Added wrappers for the mono_glob functions. + + * glob.c: New file, with globing functions used by the Directory + code. + + svn path=/trunk/mono/; revision=2139 + +commit 5291c24b937d193ef9861c87421bab87e0fcc4da +Author: Radek Doulik +Date: Mon Jan 21 20:06:20 2002 +0000 + + ppc changes + + svn path=/trunk/mono/; revision=2090 + +commit b5472227702fc528149111f0c4406c9dadb9a9e0 +Author: Paolo Molaro +Date: Mon Jan 14 07:00:24 2002 +0000 + + Mon Jan 14 11:50:16 CET 2002 Paolo Molaro + + * x86/x86-codegen.c: added overflow condition code and some aliases + for the other ccs. + + svn path=/trunk/mono/; revision=1968 + +commit a18abcd00665e9bc660b90cf4c0bdf86456067af +Author: Paolo Molaro +Date: Thu Jan 10 16:13:26 2002 +0000 + + Thu Jan 10 19:36:27 CET 2002 Paolo Molaro + + * class.c: fix mono_class_from_mono_type () for szarray types. + Remove unused cache check in mono_class_from_type_spec(). + * icall.c: *type_from_name () functions handle simple arrays and byref. + * reflection.c: handle byref and szarray types. Handle methods without + body (gets P/Invoke compilation working). Handle types and fields in + get_token (). + * reflection.h: add rank to MonoTypeInfo. + + + Thu Jan 10 20:59:59 CET 2002 Paolo Molaro + + * interp.c, interp.h: add a flag to mono_create_trampoline () + to handle runtime methods. + + + Thu Jan 10 21:01:08 CET 2002 Paolo Molaro + + * x86/tramp.c: mono_create_trampoline (): the runtime argument is + needed to handle correctly delegates, the previous change in handling + the string return type broke them. + + svn path=/trunk/mono/; revision=1950 + +commit 66990d65e3ac907fe24cc5411591759ce60472b0 +Author: Matt Kimball +Date: Wed Jan 9 01:49:12 2002 +0000 + + Tue Jan 8 22:38:41 MST 2002 Matt Kimball + + * x86/tramp.c: handle strings returned from functions in external + libraries by converting to a Mono string object after the pinvoke'd + function returns + + svn path=/trunk/mono/; revision=1923 + +commit ba9f9e77bf38e3bb4b1a888d39c7b0aab8ae09bf +Author: Paolo Molaro +Date: Sat Jan 5 11:15:42 2002 +0000 + + Sat Jan 5 15:48:04 CET 2002 Paolo Molaro + + * icall.c: hack to make IsSubType work for TypeBuilders. + * reflection.c: emit constructors before methods. + Retrieve param names in mono_param_get_objects(). + + + Sat Jan 5 15:45:14 CET 2002 Paolo Molaro + + * interp.c: allow classname:method name in --debug argument. + Fix box opcode for valuetypes. Fix a few opcode to take a 16 bit + index instead of 32 (stloc, ldloc, starg, etc.). + + + Sat Jan 5 15:51:06 CET 2002 Paolo Molaro + + * x86/tramp.c: handle short integer return types. + + svn path=/trunk/mono/; revision=1852 + +commit 0635ffef0b38bcf88cd3320939c1d96bf8bb8c0e +Author: Miguel de Icaza +Date: Thu Jan 3 20:13:47 2002 +0000 + + Fix build for new automakes, seems to work + + svn path=/trunk/mono/; revision=1795 + +commit 054ebda213a85e3a8a1770ec5e63831e3a0f06ba +Author: Paolo Molaro +Date: Thu Dec 20 15:20:42 2001 +0000 + + Thu Dec 20 20:13:07 CET 2001 Paolo Molaro + + * x86/tramp.c: fix create_method_pointer() to pass the arguments + correctly and add check for overflow. + + svn path=/trunk/mono/; revision=1656 + +commit faaadc7132a2cdd8c13adf7fbb79d32461759493 +Author: Dietmar Maurer +Date: Mon Dec 17 06:50:02 2001 +0000 + + 2001-12-16 Dietmar Maurer + + * emit-x86.c (arch_handle_exception): new code to handle + exceptions inside unmanaged code. + + * x86.brg: impl. SAVE_LMF, RESTORE_LMF, pass implizit valuetype + address as first argument. + + * x86.brg: pass exceptions on the stack + + * jit.h (ISSTRUCT): new macro to check for real value types + (return false for enum types). + + * unicode.c (_wapi_unicode_to_utf8): byteswap UTF16 strings before + passing them to iconv + + * file-io.c: raise exceptions if handle is invalid. + + svn path=/trunk/mono/; revision=1603 + +commit 35430229b14448182d84a7f9348995019251fb28 +Author: Paolo Molaro +Date: Thu Dec 13 11:03:21 2001 +0000 + + Thu Dec 13 15:56:53 CET 2001 Paolo Molaro + + * x86/x86-codegen.h: x86_mov_memindex_imm() added. + + svn path=/trunk/mono/; revision=1565 + +commit 813f9d5a9dcbe48c711bbb8bacc876e976ce0aea +Author: Radek Doulik +Date: Thu Nov 29 21:23:53 2001 +0000 + + 2001-11-29 Radek Doulik + + * ppc/tramp.c: use r12 which is volatile instead of non-volatile + r14 to avoid saving + + svn path=/trunk/mono/; revision=1482 + +commit 0a65eb2cf0b69f68849e7196b6e00133b3ecf3fc +Author: Radek Doulik +Date: Thu Nov 29 20:19:00 2001 +0000 + + 2001-11-29 Radek Doulik + + * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS + generate libmonoarch for ppc + + svn path=/trunk/mono/; revision=1478 + +commit c4f49a88d52479062bd8b95669cb90c1b86242d0 +Author: Radek Doulik +Date: Thu Nov 29 19:32:48 2001 +0000 + + added test + + svn path=/trunk/mono/; revision=1477 + +commit 2c1c4889b99aaf4be0b894ea24b4d92201cb282d +Author: Radek Doulik +Date: Thu Nov 29 19:32:19 2001 +0000 + + added files for initial ppc support + + svn path=/trunk/mono/; revision=1476 + +commit 719926a4c59c399767f10b9567859300a768b05a +Author: Paolo Molaro +Date: Tue Nov 27 10:30:39 2001 +0000 + + Tue Nov 27 15:24:07 CET 2001 Paolo Molaro + + * x96/x86-codegen.c: x86_lea_memindex() added. + + svn path=/trunk/mono/; revision=1447 + +commit c4a26e54cfa29ea5279d1964ef4ea7f6176c0357 +Author: Paolo Molaro +Date: Mon Nov 19 06:52:53 2001 +0000 + + Mon Nov 19 11:37:14 CET 2001 Paolo Molaro + + * class.c, class.h: add mono_install_trampoline() so that the runtime + can register a function to create a trampoline: removes the ugly + requirement that a runtime needed to export arch_create_jit_trampoline. + * object.h, object.c: added mono_install_handler() so that the runtime + can install an handler for exceptions generated in C code (with + mono_raise_exception()). Added C struct for System.Delegate. + * pedump.c: removed arch_create_jit_trampoline. + * reflection.c: some cleanups to allow registering user strings and + later getting a token for methodrefs and fieldrefs before the assembly + is built. + * row-indexes.h: updates and fixes from the new ECMA specs. + + + Mon Nov 19 11:36:22 CET 2001 Paolo Molaro + + * jit.c: use mono_install_trampoline (), instead of exporting + a function to a lower-level library. + + + Mon Nov 19 11:33:00 CET 2001 Paolo Molaro + + * interp.c: start adding support for handling exceptions across + managed/unmanaged boundaries. Cleanup Delegate method invocation. + Pass the correct target object in Delegate::Invoke and use the correct + 'this' pointer in ldvirtftn (bugs pointed out by Dietmar). + + Mon Nov 19 11:32:28 CET 2001 Paolo Molaro + + * main.c: remove arch_create_jit_trampoline(). + + svn path=/trunk/mono/; revision=1380 + +commit af643d34335bfdc90a7455f99847e954456bb07d +Author: Paolo Molaro +Date: Wed Nov 14 15:18:56 2001 +0000 + + Wed Nov 14 19:21:26 CET 2001 Paolo Molaro + + * x86/tramp.c: handle boolean as a return value. + * x96/x86-codegen.c: x86_widen_memindex() added. + + + Wed Nov 14 19:23:00 CET 2001 Paolo Molaro + + * interp.c: move the stack frame dumping code to a function so it can + be called from the debugger. Fix virtual method lookup for interfaces. + Throw exceptions instead of aborting in more places. + Print also the message in an exception. Updates for field renames in + corlib. + + + Wed Nov 14 19:26:06 CET 2001 Paolo Molaro + + * class.h, class.c: add a max_interface_id to MonoClass. + * icall.c: rename my_mono_new_object() to my_mono_new_mono_type() + since it's used to do that. Added mono_type_type_from_obj(). + Make GetType() return NULL instead of segfaulting if the type was not + found. Handle simple arrays in assQualifiedName. + * object.h: add a struct to represent an Exception. + * reflection.c: output call convention in method signature. + Add code to support P/Invoke methods and fixed offsets for fields. + + svn path=/trunk/mono/; revision=1352 + +commit 041ab742894fbd6d90e2ffb3c6fddb60a869e952 +Author: Dietmar Maurer +Date: Fri Nov 9 13:40:43 2001 +0000 + + 2001-11-09 Dietmar Maurer + + * testjit.c (mono_analyze_stack): new BOX impl. + + * x86.brg: implemented INITOBJ + + * testjit.c (mono_analyze_stack): finished array support + (mono_analyze_stack): reimplemented DUP instruction + + svn path=/trunk/mono/; revision=1308 + +commit bff8e602354a8d32dfaed336600b5f648af06e70 +Author: Miguel de Icaza +Date: Thu Nov 8 21:38:32 2001 +0000 + + 2001-11-07 Miguel de Icaza + + * x86/tramp.c: Include stdlib to kill warning. + + 2001-11-07 Miguel de Icaza + + * main.c (dis_property_methods): Added missing colon which avoided + setting loc.t + + 2001-11-07 Miguel de Icaza + + * interp.c: Include stdlib to kill warning. + (check_corlib): Adjust format encodings to remove warnings. + + 2001-11-07 Miguel de Icaza + + * reflection.c (build_compressed_metadata): Eliminates warnings + and uses 64-bit clean code. + + * metadata.c (mono_type_hash): Change signature to eliminate warnings. + (mono_type_equal): Change signature to eliminate warnings. + + 2001-11-07 Miguel de Icaza + + * monoburg.y: Include string.h, stdlib.h to kill warnings. + + * sample.brg: Include string.h to remove warnings. + + svn path=/trunk/mono/; revision=1298 + +commit 306ec85b780f5f9c99ffaf19f51baa6548a298a6 +Author: Dietmar Maurer +Date: Wed Nov 7 06:33:48 2001 +0000 + + 2001-11-07 Dietmar Maurer + + * emit-x86.c (enter_method): print out all method arguments + (x86_magic_trampoline): impl. + (arch_create_simple_jit_trampoline): we use different trampolines + for static methods (no need to write the address back into to + vtable). + + svn path=/trunk/mono/; revision=1278 + +commit 689da148c801d119d0d2722ef74a497e95c5f1b3 +Author: Paolo Molaro +Date: Mon Oct 22 09:24:31 2001 +0000 + + Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro + + * x86/tramp.c: handle boolean, u1 and i1 as return values. + + svn path=/trunk/mono/; revision=1192 + +commit f6b50c3852378ca35cef63056ddec70585b3ac32 +Author: Paolo Molaro +Date: Wed Oct 10 10:11:17 2001 +0000 + + Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro + + * x86/x86-codegen.c: added x86_set_{reg,mem,membase}. + + svn path=/trunk/mono/; revision=1133 + +commit 27043fee95be8bec691045d7ab39b1be553550e9 +Author: Paolo Molaro +Date: Mon Oct 8 14:33:48 2001 +0000 + + Mon Oct 8 20:27:50 CEST 2001 Paolo Molaro + + * configure.in: define NO_UNALIGNED_ACCESS for platforms that + can't read on unaligned boundaries + + + Mon Oct 8 16:12:38 CEST 2001 Paolo Molaro + + * metadata.c, metadata.h: use MonoArrayType to describe the shape of an array. + Guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux). + * image.c: endian fixes by Laurent Rioux. + * object.h, object.c: rename MonoStringObject to MonoString and + MonoArrayObject to MonoArray. Change some function names to conform to + the style mono__. mono_string_new_utf16 () takes a + guint16* as first argument, so don't use char*. + Provide macros to do the interesting things on arrays in a portable way. + * threads-pthread.c: updates for the API changes and #include + (required for sched_yield()). + * icall.c: updates for the API changes above. + * Makefile.am, mono-endian.c. mono-endian.h: include unaligned read routines for + platforms that need them. + + + Mon Oct 8 16:13:55 CEST 2001 Paolo Molaro + + * get.c, get.h: MonoArray changed in MonoArrayType. + * main.c: guard against calling bsearch with a NULL pointer + (pointed out by Laurent Rioux, smoux). + + + Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro + + * x86/tramp.c: remove mono_get_ansi_string () and use + mono_string_to_utf8 () instead. + + + Mon Oct 8 16:14:40 CEST 2001 Paolo Molaro + + * interp.c: use the accessors provided in object.h to deal with + MonoArrays. Updates for API renames in metadata. Throw exception + in ldelema if index is out of bounds. + + svn path=/trunk/mono/; revision=1122 + +commit 4ff31b89c4d3458dc378cd2e915ed08281a21a8b +Author: Paolo Molaro +Date: Thu Oct 4 13:32:23 2001 +0000 + + Thu Oct 4 19:10:30 CEST 2001 Paolo Molaro + + * class.c: MonoTypes stored in MonoClass are stored as + fundamental MonoTypes when the class represents a + fundamental type (System.Int32, ...). + The TypeHandle return by ldtoken is a MonoType*. + * icall.c: ves_icall_get_data_chunk () write out all the + PE/COFF stuff. Implement ves_icall_define_method (), + ves_icall_set_method_body (), ves_icall_type_from_handle (). + * image.c: properly skip unknown streams. + * loader.h, loader.c: add type_class to mono_defaults. + * metadata.c, metadata.h: export compute_size () as + mono_metadata_compute_size () with a better interface. + Typo and C&P fixes. + * pedump.c: don't try to print the entry point RVA if there is no entry point. + * reflection.c, reflection.h: many cleanups, fixes, output method + signatures and headers, typedef and typeref info, compress the metadata + tables, output all the heap streams, cli header etc. + * row-indexes.h: typo fixes. + + + Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro + + * x86/tramp.c: allow marshalling valuetypes if they are + 4 bytes long. + + + Thu Oct 4 19:05:56 CEST 2001 Paolo Molaro + + * dis-cil.c: fix printing of exception stuff. + * dump.c: display some more info in the typedef table dump. + * main.c: typo fix and method list fix. + + svn path=/trunk/mono/; revision=1071 + +commit 7328e9088acbd2609dff8d07b841c3fafd894d25 +Author: Paolo Molaro +Date: Mon Oct 1 13:07:53 2001 +0000 + + Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro + + * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment + and avoid a couple of unnecessary instructions. + + svn path=/trunk/mono/; revision=1042 + +commit 1fa26f9aa718559d3090d1c1275bf04d574368f0 +Author: Paolo Molaro +Date: Fri Sep 28 13:49:47 2001 +0000 + + Fri Sep 28 19:26:30 CEST 2001 Paolo Molaro + + * metadata.c: fix type comparison for arrays. + * loader.h, loader.c: half-assed fix to get more tests work in cygwin. + Added a couple of new classes to monodefaults. + * icall.c: added a couple of Reflection-related internalcalls. + * class.h, class.c: implemented mono_ldtoken () for RuntimeTypeHandles. + Added a byval_arg MonoType to MonoClass. + + + Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro + + * x86/tramp.c: marshal valuetypes that are enums. + + + Fri Sep 28 19:37:46 CEST 2001 Paolo Molaro + + * interp.c: Implemented ldtoken, conv.ovf.i. Use MonoClass->byval_arg + (and remove related kludges). Don't choke on access to arrays of + references. Throw an exception when an internalcall or P/Invoke + function don't have an implementation. Throw and EngineException + for unimplemented opcodes. + + svn path=/trunk/mono/; revision=1027 + +commit 0122a3ea04b06d1d51f2756e48f6392ccac1096d +Author: Paolo Molaro +Date: Thu Sep 27 09:38:19 2001 +0000 + + Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG + as basereg. + + svn path=/trunk/mono/; revision=995 + +commit a5844f903a68e9448d7031587ffbd02ed2c4f486 +Author: Paolo Molaro +Date: Wed Sep 26 10:33:18 2001 +0000 + + Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: added memindex addressing mode encoding + (and mov to/from register opcodes). + + svn path=/trunk/mono/; revision=984 + +commit 1f45df6d593cd60780ea121d08ddd035a3418e4a +Author: Paolo Molaro +Date: Mon Sep 24 13:30:32 2001 +0000 + + Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro + + * x86/tramp.c: don't change a MONO_TYPE_STRING to a char* + when it's an argument to an internalcall. + + + Mon Sep 24 18:56:59 CEST 2001 Paolo Molaro + + * object.c, object.h: added mono_ldstr (), mono_string_is_interned () and + mono_string_intern () to implement the semantics of the ldstr opcode + and the interning of System.Strings. + * icall.c: provide hooks to make String::IsIntern and String::Intern + internalcalls. + + + Mon Sep 24 18:50:25 CEST 2001 Paolo Molaro + + * interp.c: catch a few more error conditions with exceptions instead of + erroring out. + Don't use g_print() in stack traces because it doesn't work with + some float values. + When we call an instance method of a valuetype class, unbox the 'this' + argument if it is an object. + Use mono_ldstr () to implement the ldstr opcode: it takes care of + interning the string if necessary. + Implemented new opcodes: ckfinite, cgt.un, clt.un, ldvirtftn, ldarga. + Fixes to handle NaNs when comparing doubles. + Make sure the loaded assembly has an entry point defined. + Fixed portability bugs in neg and not opcodes. + + svn path=/trunk/mono/; revision=943 + +commit a995bd527db97e45d979a6b97e0a15a479d2e14b +Author: Paolo Molaro +Date: Sun Sep 23 07:49:26 2001 +0000 + + Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro + + * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines. + + svn path=/trunk/mono/; revision=927 + +commit c9d21b14c718c8e7f3690f5d93ac349bbdd98d88 +Author: Dietmar Maurer +Date: Fri Sep 21 12:50:46 2001 +0000 + + implemented more opcodes + + svn path=/trunk/mono/; revision=916 + +commit a0930b7dcd7fe845e1c3c06f3fba6736f88d8bf9 +Author: Paolo Molaro +Date: Thu Sep 20 15:31:50 2001 +0000 + + Thu Sep 20 16:32:42 CEST 2001 Paolo Molaro + + * interp.c: implemented some more opcodes: calli, rem.un, + shr.un, conv.u, cpobj, stobj, conv.r.un, conv.ovf.i1.un, + conv.ovf.i2.un, conv.ovf.i4.un, conv.ovf.i8.un, conv.ovf.i.un, + conv.ovf.u1.un, conv.ovf.u2.un, conv.ovf.u4.un, conv.ovf.u8.un, + conv.ovf.u.un. + Fix some 64 bit issues in the array element access code and a small bug. + Throw an exception on index out of range instead of asserting. + Throw an exception on a NULL array instead of dying. + Stomped a memory corruption bug (.cctor methods were freed after + executing them, but they are stores in MonoClass now...). + Added a simple facility to invoke the debugger when a named + function is entered (use the cmdline option --debug method_name). + * interp.h: fix 64 bit issue. + + svn path=/trunk/mono/; revision=904 + +commit e177e60b93378860f0573f458d06cd641770a255 +Author: Paolo Molaro +Date: Tue Sep 18 07:26:43 2001 +0000 + + Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: remove C++ comments. + + svn path=/trunk/mono/; revision=865 + +commit 4f874ee6ae2442c99421087b5ad11eae88283d55 +Author: Dietmar Maurer +Date: Mon Sep 17 09:10:44 2001 +0000 + + 2001-09-17 Dietmar Maurer + + * x86.brg: emit real code for calls + + * testjit.c (create_jit_trampoline): creates a function to trigger jit + compilation. + (mono_compile_method): reversed argument order + + svn path=/trunk/mono/; revision=842 + +commit 011e42b68518f5c1397ecdc0417c021b4c524560 +Author: Dietmar Maurer +Date: Mon Sep 17 07:18:11 2001 +0000 + + 2001-09-17 Dietmar Maurer + + * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest + + svn path=/trunk/mono/; revision=841 + +commit c61474703f058c226a94ba9cdfb1d19e3a45eecd +Author: Dietmar Maurer +Date: Wed Sep 12 03:47:43 2001 +0000 + + *** empty log message *** + + svn path=/trunk/mono/; revision=792 + +commit db78bf2c09f07356fe4c8284d1a48fa9867bd2fc +Author: Paolo Molaro +Date: Mon Sep 10 14:26:02 2001 +0000 + + Mon Sep 10 20:19:00 CEST 2001 Paolo Molaro + + * configure.in: check for sizeof(void*) and for the architecture. + + Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro + + * Makefile.am, x86/Makefile.am: conditional compile logic + to make porting to different targets easier. + + Mon Sep 10 17:24:45 CEST 2001 Paolo Molaro + + * Makefile.am: make it work for make distcheck. + + Mon Sep 10 20:21:34 CEST 2001 Paolo Molaro + + * endian.h, assembly.c: fix some endianness issues. + + Mon Sep 10 20:20:36 CEST 2001 Paolo Molaro + + * interp.c: endian fixes, comments. + + svn path=/trunk/mono/; revision=783 + +commit ce34fcec9c53a31ba2cd48f22c9a5099d02779e5 +Author: Dietmar Maurer +Date: Mon Sep 10 09:34:11 2001 +0000 + + *** empty log message *** + + svn path=/trunk/mono/; revision=781 + +commit 6c07667b555ca78bdad5d7b6e5aa87f8078c1989 +Author: Dietmar Maurer +Date: Mon Sep 10 09:14:46 2001 +0000 + + added the jit prototype, small fixes + + svn path=/trunk/mono/; revision=780 + +commit 680963c46ae8b96cca52387e0f5b1a2e39825b90 +Author: Paolo Molaro +Date: Fri Sep 7 12:53:34 2001 +0000 + + Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fixes and x86_mov_membase_imm (). + * x86/tramp.c: implemented mono_create_method_pointer (): + creates a native pointer to a method implementation that can be + used as a normal C callback. + + + Fri Sep 7 18:45:38 CEST 2001 Paolo Molaro + + * interp.c, interp.h: make ves_exec_method () and stackval_from_data () + non static. Implement a couple of runtime methods needed to + use delegates (ves_runtime_method ()). + Implemented ldftn opcode. + + svn path=/trunk/mono/; revision=745 + +commit 4c39a186f2fa0dc3cca3ae6f6dc6584c75341adf +Author: Paolo Molaro +Date: Thu Sep 6 09:46:03 2001 +0000 + + Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: added x86_rdtsc() and fixes. + * x86/tramp.c: create trampolines to call pinvoke methods. + * x86/Makefile.am: create a libmonoarch convenience library. + + + Thu Sep 6 15:41:24 CEST 2001 Paolo Molaro + + * Makefile.am: link to libmonoarch. + * interp.h, interp.c: use mono_create_trampoline (). + Pass the command line arguments to Main (String[]) methods. + + svn path=/trunk/mono/; revision=728 + +commit d3a5cf739f1182a42d20f1d5ace2a272307da87f +Author: Paolo Molaro +Date: Mon Aug 27 03:43:09 2001 +0000 + + Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added. + + svn path=/trunk/mono/; revision=636 + +commit 231c25bd596aa45a2962a9c820fc9417985a1f3f +Author: Paolo Molaro +Date: Sat Aug 18 06:55:29 2001 +0000 + + Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit(). + + Sat Aug 18 12:42:26 CEST 2001 Paolo Molaro + + * class.c, class.h: load also the methods when loading a class. + + Sat Aug 18 12:43:38 CEST 2001 Paolo Molaro + + * interp.c, interp.h: added support code to create exceptions. + Changed interncal calling convnetion over to MonoInvocation, to support + exceptions, walking the stack back and forward and passing the 'this' + pointer separately (remove the cludges required before to pass this on the + stack). Use alloca heavily for both local vars and a copy of the incoming + arguments. Init local vars to zero. + Simplify stackval_from_data() and stackval_to_data() to only take a pointer + instead of pointer + offset. + Implement a few exceptions-related opcodes and the code to run finally, fault and + catch blocks as well as a stack trace if no handler is found. + + Sat Aug 18 12:51:28 CEST 2001 Paolo Molaro + + * metadata.c, metadata.h: in the signature and method header store + only the space required for holding the loca vars and incoming arguments. + + svn path=/trunk/mono/; revision=493 + +commit 75cdbf5cd16480631ac8579c2c2f230761e4802b +Author: Paolo Molaro +Date: Wed Aug 8 17:21:29 2001 +0000 + + Fixed x86_mov_reg_imm(). + + svn path=/trunk/mono/; revision=441 + +commit 5263eb4d219b8054b29a4d250cec40a7c8170a84 +Author: Miguel de Icaza +Date: Wed Aug 8 16:48:32 2001 +0000 + + Update copyright + + svn path=/trunk/mono/; revision=440 + +commit c9397770c008d427da0b7ad058782fc8564c10d3 +Author: Paolo Molaro +Date: Wed Aug 8 13:32:23 2001 +0000 + + Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro + + * x86/x86-codegen.h, x86/test.c: added x86 code emitter with + test. + + svn path=/trunk/mono/; revision=435 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..cb4a84d --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2001, 2002, 2003 Ximian, Inc and the individuals listed +on the ChangeLog entries. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README b/README new file mode 100644 index 0000000..cfed57d --- /dev/null +++ b/README @@ -0,0 +1,7 @@ +mono_arch +========= + +Part of Mono project, https://github.com/mono + +These are C macros that are useful when generating native code on various platforms. +This code is MIT X11 licensed. -- cgit v1.1 From 9074aa9c54ace5aa1abef6fa7187ac62f41231ba Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 12:13:23 +0200 Subject: Rename folder amd64 to x64 --- amd64/.gitignore | 4 - amd64/Makefile.am | 2 - amd64/amd64-codegen.h | 1835 ------------------------------------------------- x64/.gitignore | 4 + x64/Makefile.am | 2 + x64/amd64-codegen.h | 1835 +++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1841 insertions(+), 1841 deletions(-) delete mode 100644 amd64/.gitignore delete mode 100644 amd64/Makefile.am delete mode 100644 amd64/amd64-codegen.h create mode 100644 x64/.gitignore create mode 100644 x64/Makefile.am create mode 100644 x64/amd64-codegen.h diff --git a/amd64/.gitignore b/amd64/.gitignore deleted file mode 100644 index 6930f61..0000000 --- a/amd64/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/Makefile.in -/Makefile -/.deps -/.libs diff --git a/amd64/Makefile.am b/amd64/Makefile.am deleted file mode 100644 index 47daaaf..0000000 --- a/amd64/Makefile.am +++ /dev/null @@ -1,2 +0,0 @@ -EXTRA_DIST = amd64-codegen.h - diff --git a/amd64/amd64-codegen.h b/amd64/amd64-codegen.h deleted file mode 100644 index 3c40d9d..0000000 --- a/amd64/amd64-codegen.h +++ /dev/null @@ -1,1835 +0,0 @@ -/* - * amd64-codegen.h: Macros for generating amd64 code - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Intel Corporation (ORP Project) - * Sergey Chaban (serge@wildwestsoftware.com) - * Dietmar Maurer (dietmar@ximian.com) - * Patrik Torstensson - * Zalman Stern - * - * Copyright (C) 2000 Intel Corporation. All rights reserved. - * Copyright (C) 2001, 2002 Ximian, Inc. - */ - -#ifndef AMD64_H -#define AMD64_H - -#include - -typedef enum { - AMD64_RAX = 0, - AMD64_RCX = 1, - AMD64_RDX = 2, - AMD64_RBX = 3, - AMD64_RSP = 4, - AMD64_RBP = 5, - AMD64_RSI = 6, - AMD64_RDI = 7, - AMD64_R8 = 8, - AMD64_R9 = 9, - AMD64_R10 = 10, - AMD64_R11 = 11, - AMD64_R12 = 12, - AMD64_R13 = 13, - AMD64_R14 = 14, - AMD64_R15 = 15, - AMD64_RIP = 16, - AMD64_NREG -} AMD64_Reg_No; - -typedef enum { - AMD64_XMM0 = 0, - AMD64_XMM1 = 1, - AMD64_XMM2 = 2, - AMD64_XMM3 = 3, - AMD64_XMM4 = 4, - AMD64_XMM5 = 5, - AMD64_XMM6 = 6, - AMD64_XMM7 = 7, - AMD64_XMM8 = 8, - AMD64_XMM9 = 9, - AMD64_XMM10 = 10, - AMD64_XMM11 = 11, - AMD64_XMM12 = 12, - AMD64_XMM13 = 13, - AMD64_XMM14 = 14, - AMD64_XMM15 = 15, - AMD64_XMM_NREG = 16, -} AMD64_XMM_Reg_No; - -typedef enum -{ - AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ - AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ - AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ - AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ -} AMD64_REX_Bits; - -#if defined(__default_codegen__) - -#define amd64_codegen_pre(inst) -#define amd64_codegen_post(inst) - -#elif defined(__native_client_codegen__) - -#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); -#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); - -/* Because of rex prefixes, etc, call sequences are not constant size. */ -/* These pre- and post-sequence hooks remedy this by aligning the call */ -/* sequence after we emit it, since we will know the exact size then. */ -#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); -#define amd64_call_sequence_post(inst) \ - (mono_nacl_align_call(&_code_start, &(inst)), _code_start); - -/* Native client can load/store using one of the following registers */ -/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ -/* its upper 32 bits cleared and reference memory using r15 as the base. */ -#define amd64_is_valid_nacl_base(reg) \ - ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ - (reg) == AMD64_RBP || (reg) == AMD64_RSP) - -#endif /*__native_client_codegen__*/ - -#ifdef TARGET_WIN32 -#define AMD64_ARG_REG1 AMD64_RCX -#define AMD64_ARG_REG2 AMD64_RDX -#define AMD64_ARG_REG3 AMD64_R8 -#define AMD64_ARG_REG4 AMD64_R9 -#else -#define AMD64_ARG_REG1 AMD64_RDI -#define AMD64_ARG_REG2 AMD64_RSI -#define AMD64_ARG_REG3 AMD64_RDX -#define AMD64_ARG_REG4 AMD64_RCX -#endif - -#ifdef TARGET_WIN32 -#define AMD64_CALLEE_REGS ((1< 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } while (0) -#elif defined(__native_client_codegen__) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ - { \ - unsigned char _amd64_rex_bits = \ - (((width) > 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - amd64_nacl_tag_rex((inst)); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } while (0) -#endif - -typedef union { - guint64 val; - unsigned char b [8]; -} amd64_imm_buf; - -#include "../x86/x86-codegen.h" - -/* In 64 bit mode, all registers have a low byte subregister */ -#undef X86_IS_BYTE_REG -#define X86_IS_BYTE_REG(reg) 1 - -#define amd64_modrm_mod(modrm) ((modrm) >> 6) -#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) -#define amd64_modrm_rm(modrm) ((modrm) & 0x7) - -#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) -#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) -#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) - -#define amd64_sib_scale(sib) ((sib) >> 6) -#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) -#define amd64_sib_base(sib) ((sib) & 0x7) - -#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) - -#define x86_imm_emit64(inst,imm) \ - do { \ - amd64_imm_buf imb; \ - imb.val = (guint64) (imm); \ - *(inst)++ = imb.b [0]; \ - *(inst)++ = imb.b [1]; \ - *(inst)++ = imb.b [2]; \ - *(inst)++ = imb.b [3]; \ - *(inst)++ = imb.b [4]; \ - *(inst)++ = imb.b [5]; \ - *(inst)++ = imb.b [6]; \ - *(inst)++ = imb.b [7]; \ - } while (0) - -#define amd64_membase_emit(inst,reg,basereg,disp) do { \ - if ((basereg) == AMD64_RIP) { \ - x86_address_byte ((inst), 0, (reg)&0x7, 5); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - else \ - x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ -} while (0) - -#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ - do { \ - if (x86_is_imm8((imm))) { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x83; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((reg) == AMD64_RAX) { \ - amd64_emit_rex(inst, size, 0, 0, 0); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ - x86_imm_emit32 ((inst), (imm)); \ - } else { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x81; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ - do { \ - amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) - -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) - -#elif defined(__native_client_codegen__) -/* NaCl modules may not directly update RSP or RBP other than direct copies */ -/* between them. Instead the lower 4 bytes are updated and then added to R15 */ -#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) - -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - do{ \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ - if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ - g_assert_not_reached(); \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ - /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while(0) - -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ - if (((opc) != X86_ADD && (opc) != X86_SUB)) \ - g_assert_not_reached(); \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ - /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) - -#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) - -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - amd64_membase_emit (inst, reg, basereg, disp); \ - amd64_codegen_post(inst); \ -} while (0) - -#define amd64_mov_regp_reg(inst,regp,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_regp_emit ((inst), (reg), (regp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_mem_reg(inst,mem,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_reg(inst,dreg,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#if defined(__default_codegen__) -#define amd64_mov_reg_mem(inst,reg,mem,size) \ - do { \ - amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ - } while (0) -#elif defined(__native_client_codegen__) -/* We have to re-base memory reads because memory isn't zero based. */ -#define amd64_mov_reg_mem(inst,reg,mem,size) \ - do { \ - amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ - } while (0) -#endif /* __native_client_codegen__ */ - -#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ - do { \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ - do { \ - amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ - x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ - } while (0) - -#elif defined(__native_client_codegen__) - -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ - /* Add %r15 using LEA to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while(0) - -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ - /* Add %r15 */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ - case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ - case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_mem(inst,reg,mem) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,0); \ - *(inst)++ = (unsigned char)0x63; \ - x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,(basereg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_reg(inst,dreg,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(dreg),0,(reg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ - } while (0) - -/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of - * 32-bit immediate. Pepper with casts to avoid warnings. - */ -#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ - if ((size) == 8) \ - x86_imm_emit64 ((inst), (guint64)(imm)); \ - else \ - x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_imm(inst,reg,imm) \ - do { \ - int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) - -#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) - -#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ - if ((size) == 1) { \ - *(inst)++ = (unsigned char)0xc6; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 2) { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit16 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - - -#define amd64_lea_membase_body(inst,reg,basereg,disp) \ - do { \ - amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#if defined(__default_codegen__) -#define amd64_lea_membase(inst,reg,basereg,disp) \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)) -#elif defined(__native_client_codegen__) -/* NaCl modules may not write directly into RSP/RBP. Instead, use a */ -/* 32-bit LEA and add R15 to the effective address */ -#define amd64_lea_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ - /* 32-bit LEA */ \ - amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit((inst), (reg), (basereg), (disp)); \ - /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) -#endif /*__native_client_codegen__*/ - -/* Instruction are implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_reg(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -/* Instruction is implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_membase(inst,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_pop_reg_body(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_call_reg(inst,reg) \ - do { \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ - } while (0) - - -#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) -#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) - -#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) - -#elif defined(__native_client_codegen__) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex ((inst),0,0,0,(reg)); \ - x86_jump_reg((inst),((reg)&0x7)); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_mem_size(inst,mem,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ - amd64_codegen_post((inst)); \ - } while (0) - -#define amd64_call_reg_internal(inst,reg) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex((inst), 0, 0, 0, (reg)); \ - x86_call_reg((inst), ((reg) & 0x7)); \ - amd64_codegen_post((inst)); \ - } while (0) - -#define amd64_call_reg(inst,reg) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_call_reg_internal((inst), (reg)); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ - } while (0) - - -#define amd64_ret(inst) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_jump_reg_size((inst), AMD64_R11, 8); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_leave(inst) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_pop_reg(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - } else { \ - amd64_pop_reg_body((inst), (reg)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_movsd_reg_regp(inst,reg,regp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_regp_reg(inst,regp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_reg_regp(inst,reg,regp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_regp_reg(inst,regp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -/* The original inc_reg opcode is used as the REX prefix */ -#define amd64_inc_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),0,(reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_dec_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),1,(reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),0,0,0,(basereg)); \ - *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - amd64_membase_emit ((inst), 0, (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#if defined (__default_codegen__) - -/* From the AMD64 Software Optimization Manual */ -#define amd64_padding_size(inst,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = 0x90; break; \ - case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ - }; \ - } while (0) - -#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) -#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) - -#define amd64_jump_code_size(inst,target,size) do { \ - if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ - x86_jump_code((inst),(target)); \ - } else { \ - amd64_jump_membase ((inst), AMD64_RIP, 0); \ - *(guint64*)(inst) = (guint64)(target); \ - (inst) += 8; \ - } \ -} while (0) - -#elif defined(__native_client_codegen__) - -/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ -/* 64-bit Native Client because they load into rSP/rBP or use duplicate */ -/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ -/* Intel64 and IA-32 Architectures Optimization Reference Manual and */ -/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ - -#define amd64_padding_size(inst,size) \ - do { \ - unsigned char *code_start = (inst); \ - switch ((size)) { \ - /* xchg %eax,%eax, recognized by hardware as a NOP */ \ - case 1: *(inst)++ = 0x90; break; \ - /* xchg %ax,%ax */ \ - case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \ - break; \ - /* nop (%rax) */ \ - case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - *(inst)++ = 0x00; \ - break; \ - /* nop 0x0(%rax) */ \ - case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nop 0x0(%rax,%rax) */ \ - case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nopw 0x0(%rax,%rax) */ \ - case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ - *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nop 0x0(%rax) (32-bit displacement) */ \ - case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 2, 0, AMD64_RAX); \ - x86_imm_emit32((inst), 0); \ - break; \ - /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ - case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 2, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit32 ((inst), 0); \ - break; \ - default: \ - g_assert_not_reached(); \ - } \ - g_assert(code_start + (size) == (unsigned char *)(inst)); \ - } while (0) - - -/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_membase_size(inst,basereg,disp,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_call_reg_internal((inst), AMD64_R11); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_membase_size(inst,basereg,disp,size) \ - do { \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ - } while (0) - -/* On Native Client we can't jump more than INT_MAX in either direction */ -#define amd64_jump_code_size(inst,target,size) \ - do { \ - /* x86_jump_code used twice in case of */ \ - /* relocation by amd64_codegen_post */ \ - guint8* jump_start; \ - amd64_codegen_pre(inst); \ - assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ - x86_jump_code((inst),(target)); \ - inst = amd64_codegen_post(inst); \ - jump_start = (inst); \ - x86_jump_code((inst),(target)); \ - mono_amd64_patch(jump_start, (target)); \ -} while (0) - -#endif /*__native_client_codegen__*/ - -/* - * SSE - */ - -//TODO Reorganize SSE opcode defines. - -/* Two opcode SSE defines */ - -#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) - -#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ - amd64_codegen_pre(inst); \ - emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ - x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -/* Three opcode SSE defines */ - -#define emit_opcode3(inst,op1,op2,op3) do { \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ -} while (0) - -#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ - amd64_codegen_pre(inst); \ - *(inst)++ = (unsigned char)(op1); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) - -#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ - amd64_codegen_pre(inst); \ - emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ - x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -/* Four opcode SSE defines */ - -#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - *(inst)++ = (unsigned char)(op4); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) - -/* specific SSE opcode defines */ - -#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) - -#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) - -#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) - -#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) - -#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) - -#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) - -#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) - -#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) - -#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) - -#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) - -#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) - -#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) - -#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) - -#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) - -#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) - -#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) - -#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) - -#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) - -#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) - -#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) - -#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) - -#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) - - -#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) - -#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) - - -#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) - - -#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) - -#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) - -#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) - -#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) - -#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) - -#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) - -#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) - -#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) - -#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) - -#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) - -#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) - -#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) - -#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) - -#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) - -#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) - -#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) - -#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) - -#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) - -#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) - - -#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) - -#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) - -#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) - -#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) - -#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) - - -#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) - -#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) - -#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) - -#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) - -#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) - -#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) - -#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) - -#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) - -#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) - -#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) - -#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) - -#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) - -#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) - -#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) - -#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) - -#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) - -#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) - - -#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) - - -#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) - -#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) - -#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) - - -#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) - -#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) - -#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) - -#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) - - -#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) - -#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) - -#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) - -#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) - - -#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) - -#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) - -#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) - - -#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) - -#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) - -#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) - - -#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) - -#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) - - -#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) - -#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) - -#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) - - -#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) - -#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) - -#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) - - -#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) - -#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) - -#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) - -#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) - - -#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) - -#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) - -#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) - -#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) - - -#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) - - -#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) - -#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) - -#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) - -#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) - -#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) - -#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) - - -#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) - -#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) - -#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) - -#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) - -#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) - -#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) - - -#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) - -#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) - -#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) - -#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) - - -#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) - -#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) - -#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) - -#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) - - -#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) - -#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) - -#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) - -#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) - - -#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) - -#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) - -#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) - -#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) - -#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) - - -#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) - - -#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) - - -#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) - - -#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) - - -#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) - - -#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) - - -#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) - - -#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) - - -#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) - - -#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) - -#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) - -#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) - -#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) - -#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) - -#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) - -#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) - -#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) - - -#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) - -#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) - -#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) - - -#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) - -#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) - - -#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) - -#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) - -#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) - -#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) - -#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) - -#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) - -#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) - -/* Generated from x86-codegen.h */ - -#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) -#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) -#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) -#if !defined( __native_client_codegen__ ) -/* Defined above for Native Client, so they can be used in other macros */ -#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) -#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) -#endif -#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) -#if defined(__default_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) -#elif defined(__native_client_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) \ - do { \ - /* amd64_branch_size_body used twice in */ \ - /* case of relocation by amd64_codegen_post */ \ - guint8* branch_start; \ - amd64_codegen_pre(inst); \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - inst = amd64_codegen_post(inst); \ - branch_start = inst; \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - mono_amd64_patch(branch_start, (target)); \ - } while (0) -#endif - -#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) - -#if defined(__default_codegen__) - -#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) -#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) - -#elif defined(__native_client_codegen__) -/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_imm_size(inst,disp,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre((inst)); \ - x86_call_imm((inst),(disp)); \ - amd64_call_sequence_post((inst)); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* x86_call_code is called twice below, first so we can get the size of the */ -/* call sequence, and again so the exact offset from "inst" is used, since */ -/* the sequence could have moved from amd64_call_sequence_post. */ -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_call_code_size(inst,target,size) \ - do { \ - amd64_codegen_pre((inst)); \ - guint8* adjusted_start; \ - guint8* call_start; \ - amd64_call_sequence_pre((inst)); \ - x86_call_code((inst),(target)); \ - adjusted_start = amd64_call_sequence_post((inst)); \ - call_start = adjusted_start; \ - x86_call_code(adjusted_start, (target)); \ - amd64_codegen_post((inst)); \ - mono_amd64_patch(call_start, (target)); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) -//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) -#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) -#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) - - - - -#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) -#define amd64_cld(inst) amd64_cld_size(inst,8) -#define amd64_stosb(inst) amd64_stosb_size(inst,8) -#define amd64_stosl(inst) amd64_stosl_size(inst,8) -#define amd64_stosd(inst) amd64_stosd_size(inst,8) -#define amd64_movsb(inst) amd64_movsb_size(inst,8) -#define amd64_movsl(inst) amd64_movsl_size(inst,8) -#define amd64_movsd(inst) amd64_movsd_size(inst,8) -#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) -#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) -#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) -#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) -#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) -#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) -#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) -#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) -#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) -#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) -#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) -#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) -#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) -#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) -#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) -#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) -#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) -#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) -#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) -#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) -#define amd64_nop(inst) amd64_nop_size(inst,8) -//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) -#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) -//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) -#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) -#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) -#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) -#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) -#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) -#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) -#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) -#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) -#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) -#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) -#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) -#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) -#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) -#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) -#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) -#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) -#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) -#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) -#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) -#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) -#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) -#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) -#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) -#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) -#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) -//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) -//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) -//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) -//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) -//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) -//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) -#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) -#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) -//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) -#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) -//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) -#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) -#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) -//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) -#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) -#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) -#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) -#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) -#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) -#define amd64_cdq(inst) amd64_cdq_size(inst,8) -#define amd64_wait(inst) amd64_wait_size(inst,8) -#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) -#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) -#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) -#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) -#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) -#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) -#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) -#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) -#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) -#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) -#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) -#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) -#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) -#define amd64_fchs(inst) amd64_fchs_size(inst,8) -#define amd64_frem(inst) amd64_frem_size(inst,8) -#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) -#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) -#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) -#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) -#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) -#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) -#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) -#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) -#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) -#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) -#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) -#define amd64_fldz(inst) amd64_fldz_size(inst,8) -#define amd64_fld1(inst) amd64_fld1_size(inst,8) -#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) -#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) -#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) -#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) -#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) -#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) -#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) -#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) -//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) -#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) -#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) -//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) -#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) -#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) -//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) -#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) -#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) -#define amd64_pushad(inst) amd64_pushad_size(inst,8) -#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) -#define amd64_popad(inst) amd64_popad_size(inst,8) -#define amd64_popfd(inst) amd64_popfd_size(inst,8) -#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) -#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) -#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) -#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) -#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) -#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) -#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) -#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) -#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) -#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) -#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) -#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) -#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) -#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) -#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) -#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) -#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) -#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) -//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) -#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) -#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) -#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) -//#define amd64_ret(inst) amd64_ret_size(inst,8) -#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) -#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) -#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) -#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) -#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) -//#define amd64_leave(inst) amd64_leave_size(inst,8) -#define amd64_sahf(inst) amd64_sahf_size(inst,8) -#define amd64_fsin(inst) amd64_fsin_size(inst,8) -#define amd64_fcos(inst) amd64_fcos_size(inst,8) -#define amd64_fabs(inst) amd64_fabs_size(inst,8) -#define amd64_ftst(inst) amd64_ftst_size(inst,8) -#define amd64_fxam(inst) amd64_fxam_size(inst,8) -#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) -#define amd64_fprem(inst) amd64_fprem_size(inst,8) -#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) -#define amd64_frndint(inst) amd64_frndint_size(inst,8) -#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) -#define amd64_fptan(inst) amd64_fptan_size(inst,8) -#define amd64_padding(inst,size) amd64_padding_size(inst,size) -#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) -#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) - -#endif // AMD64_H diff --git a/x64/.gitignore b/x64/.gitignore new file mode 100644 index 0000000..6930f61 --- /dev/null +++ b/x64/.gitignore @@ -0,0 +1,4 @@ +/Makefile.in +/Makefile +/.deps +/.libs diff --git a/x64/Makefile.am b/x64/Makefile.am new file mode 100644 index 0000000..47daaaf --- /dev/null +++ b/x64/Makefile.am @@ -0,0 +1,2 @@ +EXTRA_DIST = amd64-codegen.h + diff --git a/x64/amd64-codegen.h b/x64/amd64-codegen.h new file mode 100644 index 0000000..3c40d9d --- /dev/null +++ b/x64/amd64-codegen.h @@ -0,0 +1,1835 @@ +/* + * amd64-codegen.h: Macros for generating amd64 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * Zalman Stern + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef AMD64_H +#define AMD64_H + +#include + +typedef enum { + AMD64_RAX = 0, + AMD64_RCX = 1, + AMD64_RDX = 2, + AMD64_RBX = 3, + AMD64_RSP = 4, + AMD64_RBP = 5, + AMD64_RSI = 6, + AMD64_RDI = 7, + AMD64_R8 = 8, + AMD64_R9 = 9, + AMD64_R10 = 10, + AMD64_R11 = 11, + AMD64_R12 = 12, + AMD64_R13 = 13, + AMD64_R14 = 14, + AMD64_R15 = 15, + AMD64_RIP = 16, + AMD64_NREG +} AMD64_Reg_No; + +typedef enum { + AMD64_XMM0 = 0, + AMD64_XMM1 = 1, + AMD64_XMM2 = 2, + AMD64_XMM3 = 3, + AMD64_XMM4 = 4, + AMD64_XMM5 = 5, + AMD64_XMM6 = 6, + AMD64_XMM7 = 7, + AMD64_XMM8 = 8, + AMD64_XMM9 = 9, + AMD64_XMM10 = 10, + AMD64_XMM11 = 11, + AMD64_XMM12 = 12, + AMD64_XMM13 = 13, + AMD64_XMM14 = 14, + AMD64_XMM15 = 15, + AMD64_XMM_NREG = 16, +} AMD64_XMM_Reg_No; + +typedef enum +{ + AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} AMD64_REX_Bits; + +#if defined(__default_codegen__) + +#define amd64_codegen_pre(inst) +#define amd64_codegen_post(inst) + +#elif defined(__native_client_codegen__) + +#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); +#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); + +/* Because of rex prefixes, etc, call sequences are not constant size. */ +/* These pre- and post-sequence hooks remedy this by aligning the call */ +/* sequence after we emit it, since we will know the exact size then. */ +#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); +#define amd64_call_sequence_post(inst) \ + (mono_nacl_align_call(&_code_start, &(inst)), _code_start); + +/* Native client can load/store using one of the following registers */ +/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ +/* its upper 32 bits cleared and reference memory using r15 as the base. */ +#define amd64_is_valid_nacl_base(reg) \ + ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ + (reg) == AMD64_RBP || (reg) == AMD64_RSP) + +#endif /*__native_client_codegen__*/ + +#ifdef TARGET_WIN32 +#define AMD64_ARG_REG1 AMD64_RCX +#define AMD64_ARG_REG2 AMD64_RDX +#define AMD64_ARG_REG3 AMD64_R8 +#define AMD64_ARG_REG4 AMD64_R9 +#else +#define AMD64_ARG_REG1 AMD64_RDI +#define AMD64_ARG_REG2 AMD64_RSI +#define AMD64_ARG_REG3 AMD64_RDX +#define AMD64_ARG_REG4 AMD64_RCX +#endif + +#ifdef TARGET_WIN32 +#define AMD64_CALLEE_REGS ((1< 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } while (0) +#elif defined(__native_client_codegen__) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + amd64_nacl_tag_rex((inst)); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } while (0) +#endif + +typedef union { + guint64 val; + unsigned char b [8]; +} amd64_imm_buf; + +#include "../x86/x86-codegen.h" + +/* In 64 bit mode, all registers have a low byte subregister */ +#undef X86_IS_BYTE_REG +#define X86_IS_BYTE_REG(reg) 1 + +#define amd64_modrm_mod(modrm) ((modrm) >> 6) +#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define amd64_modrm_rm(modrm) ((modrm) & 0x7) + +#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) +#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) +#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) + +#define amd64_sib_scale(sib) ((sib) >> 6) +#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) +#define amd64_sib_base(sib) ((sib) & 0x7) + +#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) + +#define x86_imm_emit64(inst,imm) \ + do { \ + amd64_imm_buf imb; \ + imb.val = (guint64) (imm); \ + *(inst)++ = imb.b [0]; \ + *(inst)++ = imb.b [1]; \ + *(inst)++ = imb.b [2]; \ + *(inst)++ = imb.b [3]; \ + *(inst)++ = imb.b [4]; \ + *(inst)++ = imb.b [5]; \ + *(inst)++ = imb.b [6]; \ + *(inst)++ = imb.b [7]; \ + } while (0) + +#define amd64_membase_emit(inst,reg,basereg,disp) do { \ + if ((basereg) == AMD64_RIP) { \ + x86_address_byte ((inst), 0, (reg)&0x7, 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + else \ + x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ +} while (0) + +#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ + do { \ + if (x86_is_imm8((imm))) { \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x83; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((reg) == AMD64_RAX) { \ + amd64_emit_rex(inst, size, 0, 0, 0); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ + x86_imm_emit32 ((inst), (imm)); \ + } else { \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x81; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ + do { \ + amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) + +#elif defined(__native_client_codegen__) +/* NaCl modules may not directly update RSP or RBP other than direct copies */ +/* between them. Instead the lower 4 bytes are updated and then added to R15 */ +#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + do{ \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ + if (((opc) != X86_ADD && (opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) + +#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) + +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + amd64_membase_emit (inst, reg, basereg, disp); \ + amd64_codegen_post(inst); \ +} while (0) + +#define amd64_mov_regp_reg(inst,regp,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_regp_emit ((inst), (reg), (regp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_mem_reg(inst,mem,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, 0); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_reg(inst,dreg,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, 0); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#if defined(__default_codegen__) +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ + } while (0) +#elif defined(__native_client_codegen__) +/* We have to re-base memory reads because memory isn't zero based. */ +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ + } while (0) +#endif /* __native_client_codegen__ */ + +#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ + do { \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ + x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } while (0) + +#elif defined(__native_client_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ + /* Add %r15 using LEA to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ + /* Add %r15 */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ + case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ + case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_mem(inst,reg,mem) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,0); \ + *(inst)++ = (unsigned char)0x63; \ + x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,(basereg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_reg(inst,dreg,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(dreg),0,(reg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ + } while (0) + +/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of + * 32-bit immediate. Pepper with casts to avoid warnings. + */ +#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, (size), 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ + if ((size) == 8) \ + x86_imm_emit64 ((inst), (guint64)(imm)); \ + else \ + x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_imm(inst,reg,imm) \ + do { \ + int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) + +#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) + +#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + + +#define amd64_lea_membase_body(inst,reg,basereg,disp) \ + do { \ + amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x8d; \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#if defined(__default_codegen__) +#define amd64_lea_membase(inst,reg,basereg,disp) \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)) +#elif defined(__native_client_codegen__) +/* NaCl modules may not write directly into RSP/RBP. Instead, use a */ +/* 32-bit LEA and add R15 to the effective address */ +#define amd64_lea_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + /* 32-bit LEA */ \ + amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x8d; \ + amd64_membase_emit((inst), (reg), (basereg), (disp)); \ + /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) +#endif /*__native_client_codegen__*/ + +/* Instruction are implicitly 64-bits so don't generate REX for just the size. */ +#define amd64_push_reg(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +/* Instruction is implicitly 64-bits so don't generate REX for just the size. */ +#define amd64_push_membase(inst,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_pop_reg_body(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_call_reg(inst,reg) \ + do { \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ + } while (0) + + +#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) +#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) + +#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) + +#elif defined(__native_client_codegen__) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex ((inst),0,0,0,(reg)); \ + x86_jump_reg((inst),((reg)&0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_mem_size(inst,mem,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg_internal(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex((inst), 0, 0, 0, (reg)); \ + x86_call_reg((inst), ((reg) & 0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_call_reg_internal((inst), (reg)); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + + +#define amd64_ret(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_jump_reg_size((inst), AMD64_R11, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_leave(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_pop_reg(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + } else { \ + amd64_pop_reg_body((inst), (reg)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_movsd_reg_regp(inst,reg,regp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_regp_reg(inst,regp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_reg_regp(inst,reg,regp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_regp_reg(inst,regp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +/* The original inc_reg opcode is used as the REX prefix */ +#define amd64_inc_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),0,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_dec_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),1,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),0,0,0,(basereg)); \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + amd64_membase_emit ((inst), 0, (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#if defined (__default_codegen__) + +/* From the AMD64 Software Optimization Manual */ +#define amd64_padding_size(inst,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = 0x90; break; \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ + }; \ + } while (0) + +#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) +#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) + +#define amd64_jump_code_size(inst,target,size) do { \ + if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ + x86_jump_code((inst),(target)); \ + } else { \ + amd64_jump_membase ((inst), AMD64_RIP, 0); \ + *(guint64*)(inst) = (guint64)(target); \ + (inst) += 8; \ + } \ +} while (0) + +#elif defined(__native_client_codegen__) + +/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ +/* 64-bit Native Client because they load into rSP/rBP or use duplicate */ +/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ +/* Intel64 and IA-32 Architectures Optimization Reference Manual and */ +/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ + +#define amd64_padding_size(inst,size) \ + do { \ + unsigned char *code_start = (inst); \ + switch ((size)) { \ + /* xchg %eax,%eax, recognized by hardware as a NOP */ \ + case 1: *(inst)++ = 0x90; break; \ + /* xchg %ax,%ax */ \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \ + break; \ + /* nop (%rax) */ \ + case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + *(inst)++ = 0x00; \ + break; \ + /* nop 0x0(%rax) */ \ + case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) */ \ + case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nopw 0x0(%rax,%rax) */ \ + case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ + *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax) (32-bit displacement) */ \ + case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, AMD64_RAX); \ + x86_imm_emit32((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ + case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit32 ((inst), 0); \ + break; \ + default: \ + g_assert_not_reached(); \ + } \ + g_assert(code_start + (size) == (unsigned char *)(inst)); \ + } while (0) + + +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_call_reg_internal((inst), AMD64_R11); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + } while (0) + +/* On Native Client we can't jump more than INT_MAX in either direction */ +#define amd64_jump_code_size(inst,target,size) \ + do { \ + /* x86_jump_code used twice in case of */ \ + /* relocation by amd64_codegen_post */ \ + guint8* jump_start; \ + amd64_codegen_pre(inst); \ + assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ + x86_jump_code((inst),(target)); \ + inst = amd64_codegen_post(inst); \ + jump_start = (inst); \ + x86_jump_code((inst),(target)); \ + mono_amd64_patch(jump_start, (target)); \ +} while (0) + +#endif /*__native_client_codegen__*/ + +/* + * SSE + */ + +//TODO Reorganize SSE opcode defines. + +/* Two opcode SSE defines */ + +#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) + +#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ + amd64_codegen_pre(inst); \ + emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +/* Three opcode SSE defines */ + +#define emit_opcode3(inst,op1,op2,op3) do { \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ +} while (0) + +#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ + amd64_codegen_pre(inst); \ + *(inst)++ = (unsigned char)(op1); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) + +#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ + amd64_codegen_pre(inst); \ + emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +/* Four opcode SSE defines */ + +#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + *(inst)++ = (unsigned char)(op4); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) + +/* specific SSE opcode defines */ + +#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) + +#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) + +#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) + +#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) + +#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) + +#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) + +#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) + +#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) + +#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) + +#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) + +#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) + +#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) + +#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) + +#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) + +#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) + +#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) + +#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) + +#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) + + +#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) + +#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) + + +#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) + + +#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) + +#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) + +#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) + +#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) + +#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) + +#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) + +#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) + +#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) + +#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) + +#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) + +#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) + +#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) + +#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) + +#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) + +#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) + +#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) + +#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) + +#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) + +#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) + + +#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) + +#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) + +#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) + + +#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) + +#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) + +#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) + +#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) + +#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) + +#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) + +#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) + +#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) + +#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) + +#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) + +#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) + +#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) + +#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) + +#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) + +#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) + +#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) + +#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) + + +#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) + + +#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) + +#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) + +#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) + + +#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) + +#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) + +#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) + +#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) + + +#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) + +#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) + +#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) + +#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) + + +#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) + +#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) + +#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) + + +#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) + +#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) + +#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) + + +#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) + +#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) + +#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) + +#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) + + +#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) + +#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) + +#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) + + +#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) + +#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) + +#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) + +#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) + + +#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) + +#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) + +#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) + +#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) + + +#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) + + +#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) + +#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) + +#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) + +#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) + +#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) + +#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) + + +#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) + +#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) + +#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) + +#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) + +#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) + +#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) + + +#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) + +#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) + +#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) + +#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) + + +#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) + +#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + +#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) + +#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + + +#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) + +#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) + +#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) + +#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) + + +#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) + +#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) + +#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) + +#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) + +#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) + + +#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) + + +#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) + + +#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) + + +#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) + + +#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) + + +#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) + + +#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) + + +#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) + + +#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) + +#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) + +#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) + +#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) + +#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) + +#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) + +#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) + +#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) + + +#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) + +#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) + +#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) + + +#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) + +#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) + + +#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) + +#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) + +#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) + +#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) + +#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) + +#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) + +#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) + +/* Generated from x86-codegen.h */ + +#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) +#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) +#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) +#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) +#if !defined( __native_client_codegen__ ) +/* Defined above for Native Client, so they can be used in other macros */ +#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) +#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) +#endif +#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) +#if defined(__default_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) +#elif defined(__native_client_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) \ + do { \ + /* amd64_branch_size_body used twice in */ \ + /* case of relocation by amd64_codegen_post */ \ + guint8* branch_start; \ + amd64_codegen_pre(inst); \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + inst = amd64_codegen_post(inst); \ + branch_start = inst; \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + mono_amd64_patch(branch_start, (target)); \ + } while (0) +#endif + +#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) + +#if defined(__default_codegen__) + +#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) +#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) + +#elif defined(__native_client_codegen__) +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_imm_size(inst,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre((inst)); \ + x86_call_imm((inst),(disp)); \ + amd64_call_sequence_post((inst)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* x86_call_code is called twice below, first so we can get the size of the */ +/* call sequence, and again so the exact offset from "inst" is used, since */ +/* the sequence could have moved from amd64_call_sequence_post. */ +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_call_code_size(inst,target,size) \ + do { \ + amd64_codegen_pre((inst)); \ + guint8* adjusted_start; \ + guint8* call_start; \ + amd64_call_sequence_pre((inst)); \ + x86_call_code((inst),(target)); \ + adjusted_start = amd64_call_sequence_post((inst)); \ + call_start = adjusted_start; \ + x86_call_code(adjusted_start, (target)); \ + amd64_codegen_post((inst)); \ + mono_amd64_patch(call_start, (target)); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) +//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) +#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) +#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) + + + + +#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) +#define amd64_cld(inst) amd64_cld_size(inst,8) +#define amd64_stosb(inst) amd64_stosb_size(inst,8) +#define amd64_stosl(inst) amd64_stosl_size(inst,8) +#define amd64_stosd(inst) amd64_stosd_size(inst,8) +#define amd64_movsb(inst) amd64_movsb_size(inst,8) +#define amd64_movsl(inst) amd64_movsl_size(inst,8) +#define amd64_movsd(inst) amd64_movsd_size(inst,8) +#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) +#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) +#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) +#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) +#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) +#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) +#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) +#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) +#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) +#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) +#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) +#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) +#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) +#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) +#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) +#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) +#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) +#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) +#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) +#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) +#define amd64_nop(inst) amd64_nop_size(inst,8) +//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) +#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) +//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) +#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) +#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) +#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) +#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) +#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) +#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) +#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) +#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) +#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) +#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) +#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) +#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) +#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) +#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) +#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) +#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) +#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) +#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) +#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) +#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) +#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) +#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) +#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) +#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) +#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) +//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) +//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) +//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) +//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) +//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) +//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) +#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) +#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) +//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) +#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) +//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) +#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) +#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) +//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) +#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) +#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) +#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) +#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) +#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) +#define amd64_cdq(inst) amd64_cdq_size(inst,8) +#define amd64_wait(inst) amd64_wait_size(inst,8) +#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) +#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) +#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) +#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) +#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) +#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) +#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) +#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) +#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) +#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) +#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) +#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) +#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) +#define amd64_fchs(inst) amd64_fchs_size(inst,8) +#define amd64_frem(inst) amd64_frem_size(inst,8) +#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) +#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) +#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) +#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) +#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) +#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) +#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) +#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) +#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) +#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) +#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) +#define amd64_fldz(inst) amd64_fldz_size(inst,8) +#define amd64_fld1(inst) amd64_fld1_size(inst,8) +#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) +#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) +#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) +#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) +#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) +#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) +#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) +#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) +//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) +#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) +#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) +//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) +#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) +#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) +//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) +#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) +#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) +#define amd64_pushad(inst) amd64_pushad_size(inst,8) +#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) +#define amd64_popad(inst) amd64_popad_size(inst,8) +#define amd64_popfd(inst) amd64_popfd_size(inst,8) +#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) +#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) +#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) +#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) +#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) +#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) +#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) +#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) +#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) +#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) +#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) +#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) +#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) +#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) +#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) +#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) +#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) +#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) +//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) +#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) +#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) +#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) +//#define amd64_ret(inst) amd64_ret_size(inst,8) +#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) +#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) +#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) +#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) +#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) +//#define amd64_leave(inst) amd64_leave_size(inst,8) +#define amd64_sahf(inst) amd64_sahf_size(inst,8) +#define amd64_fsin(inst) amd64_fsin_size(inst,8) +#define amd64_fcos(inst) amd64_fcos_size(inst,8) +#define amd64_fabs(inst) amd64_fabs_size(inst,8) +#define amd64_ftst(inst) amd64_ftst_size(inst,8) +#define amd64_fxam(inst) amd64_fxam_size(inst,8) +#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) +#define amd64_fprem(inst) amd64_fprem_size(inst,8) +#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) +#define amd64_frndint(inst) amd64_frndint_size(inst,8) +#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) +#define amd64_fptan(inst) amd64_fptan_size(inst,8) +#define amd64_padding(inst,size) amd64_padding_size(inst,size) +#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) +#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) + +#endif // AMD64_H -- cgit v1.1 From 2f9d2dd0d14a18d0f33800f01c1a1e76d1a228ff Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 12:16:11 +0200 Subject: Rename amd64-codegen.h to x64-codegen.h --- x64/Makefile.am | 2 +- x64/amd64-codegen.h | 1835 --------------------------------------------------- x64/x64-codegen.h | 1835 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1836 insertions(+), 1836 deletions(-) delete mode 100644 x64/amd64-codegen.h create mode 100644 x64/x64-codegen.h diff --git a/x64/Makefile.am b/x64/Makefile.am index 47daaaf..db9d583 100644 --- a/x64/Makefile.am +++ b/x64/Makefile.am @@ -1,2 +1,2 @@ -EXTRA_DIST = amd64-codegen.h +EXTRA_DIST = x64-codegen.h diff --git a/x64/amd64-codegen.h b/x64/amd64-codegen.h deleted file mode 100644 index 3c40d9d..0000000 --- a/x64/amd64-codegen.h +++ /dev/null @@ -1,1835 +0,0 @@ -/* - * amd64-codegen.h: Macros for generating amd64 code - * - * Authors: - * Paolo Molaro (lupus@ximian.com) - * Intel Corporation (ORP Project) - * Sergey Chaban (serge@wildwestsoftware.com) - * Dietmar Maurer (dietmar@ximian.com) - * Patrik Torstensson - * Zalman Stern - * - * Copyright (C) 2000 Intel Corporation. All rights reserved. - * Copyright (C) 2001, 2002 Ximian, Inc. - */ - -#ifndef AMD64_H -#define AMD64_H - -#include - -typedef enum { - AMD64_RAX = 0, - AMD64_RCX = 1, - AMD64_RDX = 2, - AMD64_RBX = 3, - AMD64_RSP = 4, - AMD64_RBP = 5, - AMD64_RSI = 6, - AMD64_RDI = 7, - AMD64_R8 = 8, - AMD64_R9 = 9, - AMD64_R10 = 10, - AMD64_R11 = 11, - AMD64_R12 = 12, - AMD64_R13 = 13, - AMD64_R14 = 14, - AMD64_R15 = 15, - AMD64_RIP = 16, - AMD64_NREG -} AMD64_Reg_No; - -typedef enum { - AMD64_XMM0 = 0, - AMD64_XMM1 = 1, - AMD64_XMM2 = 2, - AMD64_XMM3 = 3, - AMD64_XMM4 = 4, - AMD64_XMM5 = 5, - AMD64_XMM6 = 6, - AMD64_XMM7 = 7, - AMD64_XMM8 = 8, - AMD64_XMM9 = 9, - AMD64_XMM10 = 10, - AMD64_XMM11 = 11, - AMD64_XMM12 = 12, - AMD64_XMM13 = 13, - AMD64_XMM14 = 14, - AMD64_XMM15 = 15, - AMD64_XMM_NREG = 16, -} AMD64_XMM_Reg_No; - -typedef enum -{ - AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ - AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ - AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ - AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ -} AMD64_REX_Bits; - -#if defined(__default_codegen__) - -#define amd64_codegen_pre(inst) -#define amd64_codegen_post(inst) - -#elif defined(__native_client_codegen__) - -#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); -#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); - -/* Because of rex prefixes, etc, call sequences are not constant size. */ -/* These pre- and post-sequence hooks remedy this by aligning the call */ -/* sequence after we emit it, since we will know the exact size then. */ -#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); -#define amd64_call_sequence_post(inst) \ - (mono_nacl_align_call(&_code_start, &(inst)), _code_start); - -/* Native client can load/store using one of the following registers */ -/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ -/* its upper 32 bits cleared and reference memory using r15 as the base. */ -#define amd64_is_valid_nacl_base(reg) \ - ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ - (reg) == AMD64_RBP || (reg) == AMD64_RSP) - -#endif /*__native_client_codegen__*/ - -#ifdef TARGET_WIN32 -#define AMD64_ARG_REG1 AMD64_RCX -#define AMD64_ARG_REG2 AMD64_RDX -#define AMD64_ARG_REG3 AMD64_R8 -#define AMD64_ARG_REG4 AMD64_R9 -#else -#define AMD64_ARG_REG1 AMD64_RDI -#define AMD64_ARG_REG2 AMD64_RSI -#define AMD64_ARG_REG3 AMD64_RDX -#define AMD64_ARG_REG4 AMD64_RCX -#endif - -#ifdef TARGET_WIN32 -#define AMD64_CALLEE_REGS ((1< 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } while (0) -#elif defined(__native_client_codegen__) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ - { \ - unsigned char _amd64_rex_bits = \ - (((width) > 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - amd64_nacl_tag_rex((inst)); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ - } while (0) -#endif - -typedef union { - guint64 val; - unsigned char b [8]; -} amd64_imm_buf; - -#include "../x86/x86-codegen.h" - -/* In 64 bit mode, all registers have a low byte subregister */ -#undef X86_IS_BYTE_REG -#define X86_IS_BYTE_REG(reg) 1 - -#define amd64_modrm_mod(modrm) ((modrm) >> 6) -#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) -#define amd64_modrm_rm(modrm) ((modrm) & 0x7) - -#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) -#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) -#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) - -#define amd64_sib_scale(sib) ((sib) >> 6) -#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) -#define amd64_sib_base(sib) ((sib) & 0x7) - -#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) - -#define x86_imm_emit64(inst,imm) \ - do { \ - amd64_imm_buf imb; \ - imb.val = (guint64) (imm); \ - *(inst)++ = imb.b [0]; \ - *(inst)++ = imb.b [1]; \ - *(inst)++ = imb.b [2]; \ - *(inst)++ = imb.b [3]; \ - *(inst)++ = imb.b [4]; \ - *(inst)++ = imb.b [5]; \ - *(inst)++ = imb.b [6]; \ - *(inst)++ = imb.b [7]; \ - } while (0) - -#define amd64_membase_emit(inst,reg,basereg,disp) do { \ - if ((basereg) == AMD64_RIP) { \ - x86_address_byte ((inst), 0, (reg)&0x7, 5); \ - x86_imm_emit32 ((inst), (disp)); \ - } \ - else \ - x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ -} while (0) - -#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ - do { \ - if (x86_is_imm8((imm))) { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x83; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((reg) == AMD64_RAX) { \ - amd64_emit_rex(inst, size, 0, 0, 0); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ - x86_imm_emit32 ((inst), (imm)); \ - } else { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x81; \ - x86_reg_emit ((inst), (opc), (reg)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - } while (0) - -#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ - do { \ - amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) - -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) - -#elif defined(__native_client_codegen__) -/* NaCl modules may not directly update RSP or RBP other than direct copies */ -/* between them. Instead the lower 4 bytes are updated and then added to R15 */ -#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) - -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - do{ \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ - if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ - g_assert_not_reached(); \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ - /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while(0) - -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ - if (((opc) != X86_ADD && (opc) != X86_SUB)) \ - g_assert_not_reached(); \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ - /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) - -#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) - -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ - *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - amd64_membase_emit (inst, reg, basereg, disp); \ - amd64_codegen_post(inst); \ -} while (0) - -#define amd64_mov_regp_reg(inst,regp,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_regp_emit ((inst), (reg), (regp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_mem_reg(inst,mem,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x88; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ - default: assert (0); \ - } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_reg(inst,dreg,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_address_byte ((inst), 0, (reg), 4); \ - x86_address_byte ((inst), 0, 4, 5); \ - x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#if defined(__default_codegen__) -#define amd64_mov_reg_mem(inst,reg,mem,size) \ - do { \ - amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ - } while (0) -#elif defined(__native_client_codegen__) -/* We have to re-base memory reads because memory isn't zero based. */ -#define amd64_mov_reg_mem(inst,reg,mem,size) \ - do { \ - amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ - } while (0) -#endif /* __native_client_codegen__ */ - -#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ - do { \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x8a; break; \ - case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ - do { \ - amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ - x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ - } while (0) - -#elif defined(__native_client_codegen__) - -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ - /* Add %r15 using LEA to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while(0) - -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ - /* Add %r15 */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ - switch ((size)) { \ - case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ - case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ - case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ - default: assert (0); \ - } \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_mem(inst,reg,mem) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,0); \ - *(inst)++ = (unsigned char)0x63; \ - x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,(basereg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsxd_reg_reg(inst,dreg,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(dreg),0,(reg)); \ - *(inst)++ = (unsigned char)0x63; \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ - } while (0) - -/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of - * 32-bit immediate. Pepper with casts to avoid warnings. - */ -#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ - if ((size) == 8) \ - x86_imm_emit64 ((inst), (guint64)(imm)); \ - else \ - x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_mov_reg_imm(inst,reg,imm) \ - do { \ - int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) - -#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) - -#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ - do { \ - amd64_codegen_pre(inst); \ - if ((size) == 2) \ - x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ - if ((size) == 1) { \ - *(inst)++ = (unsigned char)0xc6; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit8 ((inst), (imm)); \ - } else if ((size) == 2) { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit16 ((inst), (imm)); \ - } else { \ - *(inst)++ = (unsigned char)0xc7; \ - x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ - x86_imm_emit32 ((inst), (imm)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - - -#define amd64_lea_membase_body(inst,reg,basereg,disp) \ - do { \ - amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - } while (0) - -#if defined(__default_codegen__) -#define amd64_lea_membase(inst,reg,basereg,disp) \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)) -#elif defined(__native_client_codegen__) -/* NaCl modules may not write directly into RSP/RBP. Instead, use a */ -/* 32-bit LEA and add R15 to the effective address */ -#define amd64_lea_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ - /* 32-bit LEA */ \ - amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit((inst), (reg), (basereg), (disp)); \ - /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ - } else { \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) -#endif /*__native_client_codegen__*/ - -/* Instruction are implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_reg(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -/* Instruction is implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_membase(inst,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_pop_reg_body(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#if defined(__default_codegen__) - -#define amd64_call_reg(inst,reg) \ - do { \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ - } while (0) - - -#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) -#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) - -#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) - -#elif defined(__native_client_codegen__) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex ((inst),0,0,0,(reg)); \ - x86_jump_reg((inst),((reg)&0x7)); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_mem_size(inst,mem,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ - amd64_codegen_post((inst)); \ - } while (0) - -#define amd64_call_reg_internal(inst,reg) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex((inst), 0, 0, 0, (reg)); \ - x86_call_reg((inst), ((reg) & 0x7)); \ - amd64_codegen_post((inst)); \ - } while (0) - -#define amd64_call_reg(inst,reg) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_call_reg_internal((inst), (reg)); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ - } while (0) - - -#define amd64_ret(inst) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_jump_reg_size((inst), AMD64_R11, 8); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_leave(inst) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_pop_reg(inst,reg) \ - do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - } else { \ - amd64_pop_reg_body((inst), (reg)); \ - } \ - amd64_codegen_post(inst); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -#define amd64_movsd_reg_regp(inst,reg,regp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_regp_reg(inst,regp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_reg_regp(inst,reg,regp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_regp_reg(inst,regp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x10; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ - do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)0x0f; \ - *(inst)++ = (unsigned char)0x11; \ - x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ - } while (0) - -/* The original inc_reg opcode is used as the REX prefix */ -#define amd64_inc_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),0,(reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_dec_reg_size(inst,reg,size) \ - do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst),1,(reg) & 0x7); \ - amd64_codegen_post(inst); \ - } while (0) - -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),0,0,0,(basereg)); \ - *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - amd64_membase_emit ((inst), 0, (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#if defined (__default_codegen__) - -/* From the AMD64 Software Optimization Manual */ -#define amd64_padding_size(inst,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = 0x90; break; \ - case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ - }; \ - } while (0) - -#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) -#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) - -#define amd64_jump_code_size(inst,target,size) do { \ - if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ - x86_jump_code((inst),(target)); \ - } else { \ - amd64_jump_membase ((inst), AMD64_RIP, 0); \ - *(guint64*)(inst) = (guint64)(target); \ - (inst) += 8; \ - } \ -} while (0) - -#elif defined(__native_client_codegen__) - -/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ -/* 64-bit Native Client because they load into rSP/rBP or use duplicate */ -/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ -/* Intel64 and IA-32 Architectures Optimization Reference Manual and */ -/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ - -#define amd64_padding_size(inst,size) \ - do { \ - unsigned char *code_start = (inst); \ - switch ((size)) { \ - /* xchg %eax,%eax, recognized by hardware as a NOP */ \ - case 1: *(inst)++ = 0x90; break; \ - /* xchg %ax,%ax */ \ - case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \ - break; \ - /* nop (%rax) */ \ - case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - *(inst)++ = 0x00; \ - break; \ - /* nop 0x0(%rax) */ \ - case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nop 0x0(%rax,%rax) */ \ - case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nopw 0x0(%rax,%rax) */ \ - case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ - *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit8 ((inst), 0); \ - break; \ - /* nop 0x0(%rax) (32-bit displacement) */ \ - case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 2, 0, AMD64_RAX); \ - x86_imm_emit32((inst), 0); \ - break; \ - /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ - case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 2, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ - x86_imm_emit32 ((inst), 0); \ - break; \ - default: \ - g_assert_not_reached(); \ - } \ - g_assert(code_start + (size) == (unsigned char *)(inst)); \ - } while (0) - - -/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_membase_size(inst,basereg,disp,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_call_reg_internal((inst), AMD64_R11); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_membase_size(inst,basereg,disp,size) \ - do { \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ - } while (0) - -/* On Native Client we can't jump more than INT_MAX in either direction */ -#define amd64_jump_code_size(inst,target,size) \ - do { \ - /* x86_jump_code used twice in case of */ \ - /* relocation by amd64_codegen_post */ \ - guint8* jump_start; \ - amd64_codegen_pre(inst); \ - assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ - x86_jump_code((inst),(target)); \ - inst = amd64_codegen_post(inst); \ - jump_start = (inst); \ - x86_jump_code((inst),(target)); \ - mono_amd64_patch(jump_start, (target)); \ -} while (0) - -#endif /*__native_client_codegen__*/ - -/* - * SSE - */ - -//TODO Reorganize SSE opcode defines. - -/* Two opcode SSE defines */ - -#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) - -#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ - amd64_codegen_pre(inst); \ - emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ - x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -/* Three opcode SSE defines */ - -#define emit_opcode3(inst,op1,op2,op3) do { \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ -} while (0) - -#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ - amd64_codegen_pre(inst); \ - *(inst)++ = (unsigned char)(op1); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) - -#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ - amd64_codegen_pre(inst); \ - emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ - x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ -} while (0) - -/* Four opcode SSE defines */ - -#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ - amd64_codegen_pre(inst); \ - x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op2); \ - *(inst)++ = (unsigned char)(op3); \ - *(inst)++ = (unsigned char)(op4); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ -} while (0) - -#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) - -/* specific SSE opcode defines */ - -#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) - -#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) - -#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) - -#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) - -#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) - -#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) - -#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) - -#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) - -#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) - -#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) - -#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) - -#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) - -#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) - -#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) - -#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) - -#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) - -#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) - -#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) - -#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) - -#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) - -#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) - -#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) - -#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) - - -#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) - -#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) - - -#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) - - -#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) - -#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) - -#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) - -#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) - -#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) - -#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) - -#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) - -#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) - -#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) - -#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) - -#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) - -#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) - -#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) - -#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) - -#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) - -#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) - -#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) - -#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) - -#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) - - -#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) - -#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) - -#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) - -#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) - -#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) - - -#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) - -#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) - -#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) - -#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) - -#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) - -#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) - -#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) - -#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) - -#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) - -#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) - -#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) - -#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) - -#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) - -#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) - -#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) - -#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) - -#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) - - -#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) - - -#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) - -#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) - -#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) - - -#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) - -#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) - -#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) - -#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) - - -#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) - -#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) - -#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) - -#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) - - -#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) - -#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) - -#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) - - -#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) - -#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) - -#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) - - -#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) - -#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) - - -#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) - -#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) - -#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) - - -#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) - -#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) - -#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) - - -#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) - -#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) - -#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) - -#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) - - -#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) - -#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) - -#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) - -#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) - - -#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) - - -#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) - -#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) - -#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) - -#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) - -#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) - -#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) - - -#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) - -#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) - -#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) - -#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) - -#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) - -#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) - - -#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) - -#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) - -#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) - -#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) - - -#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) - -#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) - -#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) - -#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) - - -#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) - -#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) - -#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) - -#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) - - -#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) - -#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) - -#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) - -#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) - -#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) - - -#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) - - -#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) - - -#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) - -#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) - - -#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) - - -#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) - - -#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) - -#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) - - -#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) - - -#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) - - -#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) - -#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) - - -#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) - -#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) - -#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) - -#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) - -#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) - -#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) - -#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) - -#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) - - -#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) - -#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) - -#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) - - -#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) - -#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) - - -#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) - -#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) - -#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) - -#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) - -#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) - -#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) - -#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) - -/* Generated from x86-codegen.h */ - -#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) -#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) -#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) -#if !defined( __native_client_codegen__ ) -/* Defined above for Native Client, so they can be used in other macros */ -#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) -#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) -#endif -#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) -#if defined(__default_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) -#elif defined(__native_client_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) \ - do { \ - /* amd64_branch_size_body used twice in */ \ - /* case of relocation by amd64_codegen_post */ \ - guint8* branch_start; \ - amd64_codegen_pre(inst); \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - inst = amd64_codegen_post(inst); \ - branch_start = inst; \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - mono_amd64_patch(branch_start, (target)); \ - } while (0) -#endif - -#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) - -#if defined(__default_codegen__) - -#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) -#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) - -#elif defined(__native_client_codegen__) -/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_imm_size(inst,disp,size) \ - do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre((inst)); \ - x86_call_imm((inst),(disp)); \ - amd64_call_sequence_post((inst)); \ - amd64_codegen_post((inst)); \ - } while (0) - -/* x86_call_code is called twice below, first so we can get the size of the */ -/* call sequence, and again so the exact offset from "inst" is used, since */ -/* the sequence could have moved from amd64_call_sequence_post. */ -/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_call_code_size(inst,target,size) \ - do { \ - amd64_codegen_pre((inst)); \ - guint8* adjusted_start; \ - guint8* call_start; \ - amd64_call_sequence_pre((inst)); \ - x86_call_code((inst),(target)); \ - adjusted_start = amd64_call_sequence_post((inst)); \ - call_start = adjusted_start; \ - x86_call_code(adjusted_start, (target)); \ - amd64_codegen_post((inst)); \ - mono_amd64_patch(call_start, (target)); \ - } while (0) - -#endif /*__native_client_codegen__*/ - -//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) -//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) -#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) -#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) - - - - -#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) -#define amd64_cld(inst) amd64_cld_size(inst,8) -#define amd64_stosb(inst) amd64_stosb_size(inst,8) -#define amd64_stosl(inst) amd64_stosl_size(inst,8) -#define amd64_stosd(inst) amd64_stosd_size(inst,8) -#define amd64_movsb(inst) amd64_movsb_size(inst,8) -#define amd64_movsl(inst) amd64_movsl_size(inst,8) -#define amd64_movsd(inst) amd64_movsd_size(inst,8) -#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) -#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) -#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) -#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) -#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) -#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) -#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) -#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) -#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) -#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) -#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) -#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) -#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) -#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) -#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) -#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) -#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) -#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) -#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) -#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) -#define amd64_nop(inst) amd64_nop_size(inst,8) -//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) -#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) -//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) -#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) -#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) -#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) -#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) -#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) -#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) -#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) -#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) -#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) -#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) -#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) -#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) -#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) -#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) -#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) -#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) -#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) -#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) -#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) -#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) -#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) -#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) -#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) -#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) -#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) -//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) -//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) -//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) -//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) -//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) -//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) -#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) -#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) -//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) -#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) -//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) -#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) -#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) -//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) -#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) -#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) -#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) -#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) -#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) -#define amd64_cdq(inst) amd64_cdq_size(inst,8) -#define amd64_wait(inst) amd64_wait_size(inst,8) -#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) -#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) -#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) -#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) -#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) -#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) -#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) -#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) -#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) -#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) -#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) -#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) -#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) -#define amd64_fchs(inst) amd64_fchs_size(inst,8) -#define amd64_frem(inst) amd64_frem_size(inst,8) -#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) -#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) -#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) -#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) -#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) -#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) -#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) -#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) -#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) -#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) -#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) -#define amd64_fldz(inst) amd64_fldz_size(inst,8) -#define amd64_fld1(inst) amd64_fld1_size(inst,8) -#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) -#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) -#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) -#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) -#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) -#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) -#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) -#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) -//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) -#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) -#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) -//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) -#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) -#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) -//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) -#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) -#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) -#define amd64_pushad(inst) amd64_pushad_size(inst,8) -#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) -#define amd64_popad(inst) amd64_popad_size(inst,8) -#define amd64_popfd(inst) amd64_popfd_size(inst,8) -#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) -#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) -#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) -#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) -#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) -#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) -#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) -#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) -#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) -#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) -#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) -#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) -#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) -#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) -#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) -#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) -#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) -#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) -//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) -#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) -#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) -#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) -//#define amd64_ret(inst) amd64_ret_size(inst,8) -#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) -#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) -#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) -#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) -#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) -//#define amd64_leave(inst) amd64_leave_size(inst,8) -#define amd64_sahf(inst) amd64_sahf_size(inst,8) -#define amd64_fsin(inst) amd64_fsin_size(inst,8) -#define amd64_fcos(inst) amd64_fcos_size(inst,8) -#define amd64_fabs(inst) amd64_fabs_size(inst,8) -#define amd64_ftst(inst) amd64_ftst_size(inst,8) -#define amd64_fxam(inst) amd64_fxam_size(inst,8) -#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) -#define amd64_fprem(inst) amd64_fprem_size(inst,8) -#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) -#define amd64_frndint(inst) amd64_frndint_size(inst,8) -#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) -#define amd64_fptan(inst) amd64_fptan_size(inst,8) -#define amd64_padding(inst,size) amd64_padding_size(inst,size) -#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) -#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) - -#endif // AMD64_H diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h new file mode 100644 index 0000000..3c40d9d --- /dev/null +++ b/x64/x64-codegen.h @@ -0,0 +1,1835 @@ +/* + * amd64-codegen.h: Macros for generating amd64 code + * + * Authors: + * Paolo Molaro (lupus@ximian.com) + * Intel Corporation (ORP Project) + * Sergey Chaban (serge@wildwestsoftware.com) + * Dietmar Maurer (dietmar@ximian.com) + * Patrik Torstensson + * Zalman Stern + * + * Copyright (C) 2000 Intel Corporation. All rights reserved. + * Copyright (C) 2001, 2002 Ximian, Inc. + */ + +#ifndef AMD64_H +#define AMD64_H + +#include + +typedef enum { + AMD64_RAX = 0, + AMD64_RCX = 1, + AMD64_RDX = 2, + AMD64_RBX = 3, + AMD64_RSP = 4, + AMD64_RBP = 5, + AMD64_RSI = 6, + AMD64_RDI = 7, + AMD64_R8 = 8, + AMD64_R9 = 9, + AMD64_R10 = 10, + AMD64_R11 = 11, + AMD64_R12 = 12, + AMD64_R13 = 13, + AMD64_R14 = 14, + AMD64_R15 = 15, + AMD64_RIP = 16, + AMD64_NREG +} AMD64_Reg_No; + +typedef enum { + AMD64_XMM0 = 0, + AMD64_XMM1 = 1, + AMD64_XMM2 = 2, + AMD64_XMM3 = 3, + AMD64_XMM4 = 4, + AMD64_XMM5 = 5, + AMD64_XMM6 = 6, + AMD64_XMM7 = 7, + AMD64_XMM8 = 8, + AMD64_XMM9 = 9, + AMD64_XMM10 = 10, + AMD64_XMM11 = 11, + AMD64_XMM12 = 12, + AMD64_XMM13 = 13, + AMD64_XMM14 = 14, + AMD64_XMM15 = 15, + AMD64_XMM_NREG = 16, +} AMD64_XMM_Reg_No; + +typedef enum +{ + AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} AMD64_REX_Bits; + +#if defined(__default_codegen__) + +#define amd64_codegen_pre(inst) +#define amd64_codegen_post(inst) + +#elif defined(__native_client_codegen__) + +#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); +#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); + +/* Because of rex prefixes, etc, call sequences are not constant size. */ +/* These pre- and post-sequence hooks remedy this by aligning the call */ +/* sequence after we emit it, since we will know the exact size then. */ +#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); +#define amd64_call_sequence_post(inst) \ + (mono_nacl_align_call(&_code_start, &(inst)), _code_start); + +/* Native client can load/store using one of the following registers */ +/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ +/* its upper 32 bits cleared and reference memory using r15 as the base. */ +#define amd64_is_valid_nacl_base(reg) \ + ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ + (reg) == AMD64_RBP || (reg) == AMD64_RSP) + +#endif /*__native_client_codegen__*/ + +#ifdef TARGET_WIN32 +#define AMD64_ARG_REG1 AMD64_RCX +#define AMD64_ARG_REG2 AMD64_RDX +#define AMD64_ARG_REG3 AMD64_R8 +#define AMD64_ARG_REG4 AMD64_R9 +#else +#define AMD64_ARG_REG1 AMD64_RDI +#define AMD64_ARG_REG2 AMD64_RSI +#define AMD64_ARG_REG3 AMD64_RDX +#define AMD64_ARG_REG4 AMD64_RCX +#endif + +#ifdef TARGET_WIN32 +#define AMD64_CALLEE_REGS ((1< 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } while (0) +#elif defined(__native_client_codegen__) +#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ + { \ + unsigned char _amd64_rex_bits = \ + (((width) > 4) ? AMD64_REX_W : 0) | \ + (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ + (((reg_index) > 7) ? AMD64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ + amd64_nacl_tag_rex((inst)); \ + if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + } while (0) +#endif + +typedef union { + guint64 val; + unsigned char b [8]; +} amd64_imm_buf; + +#include "../x86/x86-codegen.h" + +/* In 64 bit mode, all registers have a low byte subregister */ +#undef X86_IS_BYTE_REG +#define X86_IS_BYTE_REG(reg) 1 + +#define amd64_modrm_mod(modrm) ((modrm) >> 6) +#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define amd64_modrm_rm(modrm) ((modrm) & 0x7) + +#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) +#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) +#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) + +#define amd64_sib_scale(sib) ((sib) >> 6) +#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) +#define amd64_sib_base(sib) ((sib) & 0x7) + +#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) + +#define x86_imm_emit64(inst,imm) \ + do { \ + amd64_imm_buf imb; \ + imb.val = (guint64) (imm); \ + *(inst)++ = imb.b [0]; \ + *(inst)++ = imb.b [1]; \ + *(inst)++ = imb.b [2]; \ + *(inst)++ = imb.b [3]; \ + *(inst)++ = imb.b [4]; \ + *(inst)++ = imb.b [5]; \ + *(inst)++ = imb.b [6]; \ + *(inst)++ = imb.b [7]; \ + } while (0) + +#define amd64_membase_emit(inst,reg,basereg,disp) do { \ + if ((basereg) == AMD64_RIP) { \ + x86_address_byte ((inst), 0, (reg)&0x7, 5); \ + x86_imm_emit32 ((inst), (disp)); \ + } \ + else \ + x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ +} while (0) + +#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ + do { \ + if (x86_is_imm8((imm))) { \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x83; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((reg) == AMD64_RAX) { \ + amd64_emit_rex(inst, size, 0, 0, 0); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ + x86_imm_emit32 ((inst), (imm)); \ + } else { \ + amd64_emit_rex(inst, size, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x81; \ + x86_reg_emit ((inst), (opc), (reg)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + } while (0) + +#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ + do { \ + amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) + +#elif defined(__native_client_codegen__) +/* NaCl modules may not directly update RSP or RBP other than direct copies */ +/* between them. Instead the lower 4 bytes are updated and then added to R15 */ +#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) + +#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + do{ \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ + if (((opc) != X86_ADD && (opc) != X86_SUB)) \ + g_assert_not_reached(); \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ + /* Use LEA instead of ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) + +#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) + +#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ + *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ + amd64_membase_emit (inst, reg, basereg, disp); \ + amd64_codegen_post(inst); \ +} while (0) + +#define amd64_mov_regp_reg(inst,regp,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_regp_emit ((inst), (reg), (regp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_mem_reg(inst,mem,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, 0); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x88; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ + default: assert (0); \ + } \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_reg(inst,dreg,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, 0); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_address_byte ((inst), 0, (reg), 4); \ + x86_address_byte ((inst), 0, 4, 5); \ + x86_imm_emit32 ((inst), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#if defined(__default_codegen__) +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ + } while (0) +#elif defined(__native_client_codegen__) +/* We have to re-base memory reads because memory isn't zero based. */ +#define amd64_mov_reg_mem(inst,reg,mem,size) \ + do { \ + amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ + } while (0) +#endif /* __native_client_codegen__ */ + +#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ + do { \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x8a; break; \ + case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ + x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } while (0) + +#elif defined(__native_client_codegen__) + +#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ + /* Add %r15 using LEA to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while(0) + +#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + /* Clear upper 32 bits with mov of size 4 */ \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ + /* Add %r15 */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + switch ((size)) { \ + case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ + case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ + case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ + default: assert (0); \ + } \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_mem(inst,reg,mem) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,0); \ + *(inst)++ = (unsigned char)0x63; \ + x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(reg),0,(basereg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsxd_reg_reg(inst,dreg,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst,8,(dreg),0,(reg)); \ + *(inst)++ = (unsigned char)0x63; \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ + } while (0) + +/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of + * 32-bit immediate. Pepper with casts to avoid warnings. + */ +#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, (size), 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ + if ((size) == 8) \ + x86_imm_emit64 ((inst), (guint64)(imm)); \ + else \ + x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_mov_reg_imm(inst,reg,imm) \ + do { \ + int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) + +#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) + +#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ + do { \ + amd64_codegen_pre(inst); \ + if ((size) == 2) \ + x86_prefix((inst), X86_OPERAND_PREFIX); \ + amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ + if ((size) == 1) { \ + *(inst)++ = (unsigned char)0xc6; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit8 ((inst), (imm)); \ + } else if ((size) == 2) { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit16 ((inst), (imm)); \ + } else { \ + *(inst)++ = (unsigned char)0xc7; \ + x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ + x86_imm_emit32 ((inst), (imm)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + + +#define amd64_lea_membase_body(inst,reg,basereg,disp) \ + do { \ + amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x8d; \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + } while (0) + +#if defined(__default_codegen__) +#define amd64_lea_membase(inst,reg,basereg,disp) \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)) +#elif defined(__native_client_codegen__) +/* NaCl modules may not write directly into RSP/RBP. Instead, use a */ +/* 32-bit LEA and add R15 to the effective address */ +#define amd64_lea_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg(reg)) { \ + /* 32-bit LEA */ \ + amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x8d; \ + amd64_membase_emit((inst), (reg), (basereg), (disp)); \ + /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ + amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + } else { \ + amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) +#endif /*__native_client_codegen__*/ + +/* Instruction are implicitly 64-bits so don't generate REX for just the size. */ +#define amd64_push_reg(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +/* Instruction is implicitly 64-bits so don't generate REX for just the size. */ +#define amd64_push_membase(inst,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_pop_reg_body(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#if defined(__default_codegen__) + +#define amd64_call_reg(inst,reg) \ + do { \ + amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ + } while (0) + + +#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) +#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) + +#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) + +#elif defined(__native_client_codegen__) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex ((inst),0,0,0,(reg)); \ + x86_jump_reg((inst),((reg)&0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_mem_size(inst,mem,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg_internal(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + amd64_emit_rex((inst), 0, 0, 0, (reg)); \ + x86_call_reg((inst), ((reg) & 0x7)); \ + amd64_codegen_post((inst)); \ + } while (0) + +#define amd64_call_reg(inst,reg) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_call_reg_internal((inst), (reg)); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + + +#define amd64_ret(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_jump_reg_size((inst), AMD64_R11, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_leave(inst) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_pop_reg(inst,reg) \ + do { \ + amd64_codegen_pre(inst); \ + if (amd64_is_nacl_stack_reg((reg))) { \ + amd64_pop_reg_body((inst), AMD64_R11); \ + amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ + amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + } else { \ + amd64_pop_reg_body((inst), (reg)); \ + } \ + amd64_codegen_post(inst); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +#define amd64_movsd_reg_regp(inst,reg,regp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_regp_reg(inst,regp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_reg_regp(inst,reg,regp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_regp_reg(inst,regp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x10; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf2); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ + do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), 0xf3); \ + amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)0x0f; \ + *(inst)++ = (unsigned char)0x11; \ + x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ + amd64_codegen_post(inst); \ + } while (0) + +/* The original inc_reg opcode is used as the REX prefix */ +#define amd64_inc_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),0,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_dec_reg_size(inst,reg,size) \ + do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),(size),0,0,(reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst),1,(reg) & 0x7); \ + amd64_codegen_post(inst); \ + } while (0) + +#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst),0,0,0,(basereg)); \ + *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ + amd64_membase_emit ((inst), 0, (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#if defined (__default_codegen__) + +/* From the AMD64 Software Optimization Manual */ +#define amd64_padding_size(inst,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = 0x90; break; \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ + }; \ + } while (0) + +#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) +#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) + +#define amd64_jump_code_size(inst,target,size) do { \ + if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ + x86_jump_code((inst),(target)); \ + } else { \ + amd64_jump_membase ((inst), AMD64_RIP, 0); \ + *(guint64*)(inst) = (guint64)(target); \ + (inst) += 8; \ + } \ +} while (0) + +#elif defined(__native_client_codegen__) + +/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ +/* 64-bit Native Client because they load into rSP/rBP or use duplicate */ +/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ +/* Intel64 and IA-32 Architectures Optimization Reference Manual and */ +/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ + +#define amd64_padding_size(inst,size) \ + do { \ + unsigned char *code_start = (inst); \ + switch ((size)) { \ + /* xchg %eax,%eax, recognized by hardware as a NOP */ \ + case 1: *(inst)++ = 0x90; break; \ + /* xchg %ax,%ax */ \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \ + break; \ + /* nop (%rax) */ \ + case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + *(inst)++ = 0x00; \ + break; \ + /* nop 0x0(%rax) */ \ + case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) */ \ + case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nopw 0x0(%rax,%rax) */ \ + case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ + *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 1, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit8 ((inst), 0); \ + break; \ + /* nop 0x0(%rax) (32-bit displacement) */ \ + case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, AMD64_RAX); \ + x86_imm_emit32((inst), 0); \ + break; \ + /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ + case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ + x86_address_byte ((inst), 2, 0, 4); \ + x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_imm_emit32 ((inst), 0); \ + break; \ + default: \ + g_assert_not_reached(); \ + } \ + g_assert(code_start + (size) == (unsigned char *)(inst)); \ + } while (0) + + +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre(inst); \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_call_reg_internal((inst), AMD64_R11); \ + amd64_call_sequence_post(inst); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_jump_membase_size(inst,basereg,disp,size) \ + do { \ + amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ + amd64_jump_reg_size((inst), AMD64_R11, 4); \ + } while (0) + +/* On Native Client we can't jump more than INT_MAX in either direction */ +#define amd64_jump_code_size(inst,target,size) \ + do { \ + /* x86_jump_code used twice in case of */ \ + /* relocation by amd64_codegen_post */ \ + guint8* jump_start; \ + amd64_codegen_pre(inst); \ + assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ + x86_jump_code((inst),(target)); \ + inst = amd64_codegen_post(inst); \ + jump_start = (inst); \ + x86_jump_code((inst),(target)); \ + mono_amd64_patch(jump_start, (target)); \ +} while (0) + +#endif /*__native_client_codegen__*/ + +/* + * SSE + */ + +//TODO Reorganize SSE opcode defines. + +/* Two opcode SSE defines */ + +#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) + +#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ + amd64_codegen_pre(inst); \ + emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ + amd64_codegen_pre(inst); \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +/* Three opcode SSE defines */ + +#define emit_opcode3(inst,op1,op2,op3) do { \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ +} while (0) + +#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ + amd64_codegen_pre(inst); \ + *(inst)++ = (unsigned char)(op1); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) + +#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ + amd64_codegen_pre(inst); \ + emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ + x86_imm_emit8 ((inst), (imm)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + amd64_codegen_post(inst); \ +} while (0) + +/* Four opcode SSE defines */ + +#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ + amd64_codegen_pre(inst); \ + x86_prefix((inst), (unsigned char)(op1)); \ + amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op2); \ + *(inst)++ = (unsigned char)(op3); \ + *(inst)++ = (unsigned char)(op4); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + amd64_codegen_post(inst); \ +} while (0) + +#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) + +/* specific SSE opcode defines */ + +#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) + +#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) + +#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) + +#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) + +#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) + +#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) + +#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) + +#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) + +#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) + +#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) + +#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) + +#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) + +#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) + +#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) + +#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) + +#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) + +#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) + +#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) + +#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) + +#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) + +#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) + + +#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) + +#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) + + +#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) + + +#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) + +#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) + +#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) + +#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) + +#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) + +#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) + +#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) + +#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) + +#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) + +#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) + +#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) + +#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) + +#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) + +#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) + +#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) + +#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) + +#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) + +#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) + +#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) + + +#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) + +#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) + +#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) + +#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) + + +#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) + +#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) + +#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) + +#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) + +#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) + +#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) + +#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) + +#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) + +#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) + +#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) + +#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) + +#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) + +#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) + +#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) + +#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) + +#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) + +#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) + + +#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) + + +#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) + +#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) + +#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) + + +#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) + +#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) + +#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) + +#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) + + +#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) + +#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) + +#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) + +#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) + + +#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) + +#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) + +#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) + + +#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) + +#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) + +#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) + + +#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) + +#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) + +#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) + +#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) + + +#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) + +#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) + +#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) + + +#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) + +#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) + +#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) + +#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) + + +#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) + +#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) + +#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) + +#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) + + +#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) + + +#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) + +#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) + +#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) + +#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) + +#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) + +#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) + + +#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) + +#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) + +#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) + +#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) + +#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) + +#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) + + +#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) + +#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) + +#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) + +#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) + + +#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) + +#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + +#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) + +#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) + + +#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) + +#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) + +#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) + +#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) + + +#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) + +#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) + +#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) + +#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) + +#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) + + +#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) + + +#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) + + +#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) + +#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) + + +#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) + + +#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) + + +#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) + +#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) + + +#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) + + +#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) + + +#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) + +#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) + + +#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) + +#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) + +#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) + +#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) + +#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) + +#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) + +#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) + +#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) + + +#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) + +#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) + +#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) + + +#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) + +#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) + + +#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) + +#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) + +#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) + +#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) + +#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) + +#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) + +#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) + +/* Generated from x86-codegen.h */ + +#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) +#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) +#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) +#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) +#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) +#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) +#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) +//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) +#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) +#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) +#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) +#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) +#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) +#if !defined( __native_client_codegen__ ) +/* Defined above for Native Client, so they can be used in other macros */ +#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) +#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) +#endif +#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) +#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) +#if defined(__default_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) +#elif defined(__native_client_codegen__) +#define amd64_branch_size(inst,cond,target,is_signed,size) \ + do { \ + /* amd64_branch_size_body used twice in */ \ + /* case of relocation by amd64_codegen_post */ \ + guint8* branch_start; \ + amd64_codegen_pre(inst); \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + inst = amd64_codegen_post(inst); \ + branch_start = inst; \ + amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + mono_amd64_patch(branch_start, (target)); \ + } while (0) +#endif + +#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) +#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) +//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) + +#if defined(__default_codegen__) + +#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) +#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) + +#elif defined(__native_client_codegen__) +/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ +#define amd64_call_imm_size(inst,disp,size) \ + do { \ + amd64_codegen_pre((inst)); \ + amd64_call_sequence_pre((inst)); \ + x86_call_imm((inst),(disp)); \ + amd64_call_sequence_post((inst)); \ + amd64_codegen_post((inst)); \ + } while (0) + +/* x86_call_code is called twice below, first so we can get the size of the */ +/* call sequence, and again so the exact offset from "inst" is used, since */ +/* the sequence could have moved from amd64_call_sequence_post. */ +/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ +#define amd64_call_code_size(inst,target,size) \ + do { \ + amd64_codegen_pre((inst)); \ + guint8* adjusted_start; \ + guint8* call_start; \ + amd64_call_sequence_pre((inst)); \ + x86_call_code((inst),(target)); \ + adjusted_start = amd64_call_sequence_post((inst)); \ + call_start = adjusted_start; \ + x86_call_code(adjusted_start, (target)); \ + amd64_codegen_post((inst)); \ + mono_amd64_patch(call_start, (target)); \ + } while (0) + +#endif /*__native_client_codegen__*/ + +//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) +#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) +#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) +//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) +#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) +#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) +#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) +#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) +//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) +#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) +#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) + + + + +#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) +#define amd64_cld(inst) amd64_cld_size(inst,8) +#define amd64_stosb(inst) amd64_stosb_size(inst,8) +#define amd64_stosl(inst) amd64_stosl_size(inst,8) +#define amd64_stosd(inst) amd64_stosd_size(inst,8) +#define amd64_movsb(inst) amd64_movsb_size(inst,8) +#define amd64_movsl(inst) amd64_movsl_size(inst,8) +#define amd64_movsd(inst) amd64_movsd_size(inst,8) +#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) +#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) +#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) +#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) +#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) +#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) +#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) +#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) +#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) +#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) +#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) +#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) +#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) +#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) +#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) +#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) +#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) +#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) +#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) +#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) +#define amd64_nop(inst) amd64_nop_size(inst,8) +//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) +#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) +//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) +#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) +#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) +#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) +#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) +#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) +#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) +#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) +#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) +#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) +#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) +#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) +#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) +#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) +#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) +#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) +#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) +#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) +#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) +#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) +#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) +#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) +#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) +#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) +#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) +#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) +#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) +#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) +#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) +#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) +//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) +//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) +//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) +#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) +//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) +//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) +//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) +#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) +#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) +//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) +#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) +//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) +#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) +#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) +//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) +#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) +#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) +#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) +#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) +#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) +#define amd64_cdq(inst) amd64_cdq_size(inst,8) +#define amd64_wait(inst) amd64_wait_size(inst,8) +#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) +#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) +#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) +#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) +#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) +#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) +#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) +#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) +#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) +#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) +#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) +#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) +#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) +#define amd64_fchs(inst) amd64_fchs_size(inst,8) +#define amd64_frem(inst) amd64_frem_size(inst,8) +#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) +#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) +#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) +#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) +#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) +#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) +#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) +#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) +#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) +#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) +#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) +#define amd64_fldz(inst) amd64_fldz_size(inst,8) +#define amd64_fld1(inst) amd64_fld1_size(inst,8) +#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) +#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) +#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) +#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) +#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) +#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) +#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) +#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) +#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) +//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) +#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) +#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) +//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) +#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) +#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) +//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) +#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) +#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) +#define amd64_pushad(inst) amd64_pushad_size(inst,8) +#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) +#define amd64_popad(inst) amd64_popad_size(inst,8) +#define amd64_popfd(inst) amd64_popfd_size(inst,8) +#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) +#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) +#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) +#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) +#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) +#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) +#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) +#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) +#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) +#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) +#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) +#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) +#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) +#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) +#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) +#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) +#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) +#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) +//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) +#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) +#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) +#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) +//#define amd64_ret(inst) amd64_ret_size(inst,8) +#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) +#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) +#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) +#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) +#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) +//#define amd64_leave(inst) amd64_leave_size(inst,8) +#define amd64_sahf(inst) amd64_sahf_size(inst,8) +#define amd64_fsin(inst) amd64_fsin_size(inst,8) +#define amd64_fcos(inst) amd64_fcos_size(inst,8) +#define amd64_fabs(inst) amd64_fabs_size(inst,8) +#define amd64_ftst(inst) amd64_ftst_size(inst,8) +#define amd64_fxam(inst) amd64_fxam_size(inst,8) +#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) +#define amd64_fprem(inst) amd64_fprem_size(inst,8) +#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) +#define amd64_frndint(inst) amd64_frndint_size(inst,8) +#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) +#define amd64_fptan(inst) amd64_fptan_size(inst,8) +#define amd64_padding(inst,size) amd64_padding_size(inst,size) +#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) +#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) + +#endif // AMD64_H -- cgit v1.1 From 825acc90bc2eb83aa5c1d8343c407e31e52baf78 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 12:29:06 +0200 Subject: Replace amd64 with x64 --- x64/x64-codegen.h | 1934 ++++++++++++++++++++++++++--------------------------- 1 file changed, 967 insertions(+), 967 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 3c40d9d..5bc438c 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -1,5 +1,5 @@ /* - * amd64-codegen.h: Macros for generating amd64 code + * x64-codegen.h: Macros for generating x86-64 code * * Authors: * Paolo Molaro (lupus@ximian.com) @@ -13,156 +13,156 @@ * Copyright (C) 2001, 2002 Ximian, Inc. */ -#ifndef AMD64_H -#define AMD64_H +#ifndef X64_H +#define X64_H #include typedef enum { - AMD64_RAX = 0, - AMD64_RCX = 1, - AMD64_RDX = 2, - AMD64_RBX = 3, - AMD64_RSP = 4, - AMD64_RBP = 5, - AMD64_RSI = 6, - AMD64_RDI = 7, - AMD64_R8 = 8, - AMD64_R9 = 9, - AMD64_R10 = 10, - AMD64_R11 = 11, - AMD64_R12 = 12, - AMD64_R13 = 13, - AMD64_R14 = 14, - AMD64_R15 = 15, - AMD64_RIP = 16, - AMD64_NREG -} AMD64_Reg_No; + X64_RAX = 0, + X64_RCX = 1, + X64_RDX = 2, + X64_RBX = 3, + X64_RSP = 4, + X64_RBP = 5, + X64_RSI = 6, + X64_RDI = 7, + X64_R8 = 8, + X64_R9 = 9, + X64_R10 = 10, + X64_R11 = 11, + X64_R12 = 12, + X64_R13 = 13, + X64_R14 = 14, + X64_R15 = 15, + X64_RIP = 16, + X64_NREG +} X64_Reg_No; typedef enum { - AMD64_XMM0 = 0, - AMD64_XMM1 = 1, - AMD64_XMM2 = 2, - AMD64_XMM3 = 3, - AMD64_XMM4 = 4, - AMD64_XMM5 = 5, - AMD64_XMM6 = 6, - AMD64_XMM7 = 7, - AMD64_XMM8 = 8, - AMD64_XMM9 = 9, - AMD64_XMM10 = 10, - AMD64_XMM11 = 11, - AMD64_XMM12 = 12, - AMD64_XMM13 = 13, - AMD64_XMM14 = 14, - AMD64_XMM15 = 15, - AMD64_XMM_NREG = 16, -} AMD64_XMM_Reg_No; + X64_XMM0 = 0, + X64_XMM1 = 1, + X64_XMM2 = 2, + X64_XMM3 = 3, + X64_XMM4 = 4, + X64_XMM5 = 5, + X64_XMM6 = 6, + X64_XMM7 = 7, + X64_XMM8 = 8, + X64_XMM9 = 9, + X64_XMM10 = 10, + X64_XMM11 = 11, + X64_XMM12 = 12, + X64_XMM13 = 13, + X64_XMM14 = 14, + X64_XMM15 = 15, + X64_XMM_NREG = 16, +} X64_XMM_Reg_No; typedef enum { - AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ - AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ - AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ - AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ -} AMD64_REX_Bits; + X64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */ + X64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */ + X64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */ + X64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ +} X64_REX_Bits; #if defined(__default_codegen__) -#define amd64_codegen_pre(inst) -#define amd64_codegen_post(inst) +#define x64_codegen_pre(inst) +#define x64_codegen_post(inst) #elif defined(__native_client_codegen__) -#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre(); -#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); +#define x64_codegen_pre(inst) guint8* _codegen_start = (inst); x64_nacl_instruction_pre(); +#define x64_codegen_post(inst) (x64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); /* Because of rex prefixes, etc, call sequences are not constant size. */ /* These pre- and post-sequence hooks remedy this by aligning the call */ /* sequence after we emit it, since we will know the exact size then. */ -#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst); -#define amd64_call_sequence_post(inst) \ +#define x64_call_sequence_pre(inst) guint8* _code_start = (inst); +#define x64_call_sequence_post(inst) \ (mono_nacl_align_call(&_code_start, &(inst)), _code_start); /* Native client can load/store using one of the following registers */ /* as a base: rip, r15, rbp, rsp. Any other base register needs to have */ /* its upper 32 bits cleared and reference memory using r15 as the base. */ -#define amd64_is_valid_nacl_base(reg) \ - ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \ - (reg) == AMD64_RBP || (reg) == AMD64_RSP) +#define x64_is_valid_nacl_base(reg) \ + ((reg) == X64_RIP || (reg) == X64_R15 || \ + (reg) == X64_RBP || (reg) == X64_RSP) #endif /*__native_client_codegen__*/ #ifdef TARGET_WIN32 -#define AMD64_ARG_REG1 AMD64_RCX -#define AMD64_ARG_REG2 AMD64_RDX -#define AMD64_ARG_REG3 AMD64_R8 -#define AMD64_ARG_REG4 AMD64_R9 +#define X64_ARG_REG1 X64_RCX +#define X64_ARG_REG2 X64_RDX +#define X64_ARG_REG3 X64_R8 +#define X64_ARG_REG4 X64_R9 #else -#define AMD64_ARG_REG1 AMD64_RDI -#define AMD64_ARG_REG2 AMD64_RSI -#define AMD64_ARG_REG3 AMD64_RDX -#define AMD64_ARG_REG4 AMD64_RCX +#define X64_ARG_REG1 X64_RDI +#define X64_ARG_REG2 X64_RSI +#define X64_ARG_REG3 X64_RDX +#define X64_ARG_REG4 X64_RCX #endif #ifdef TARGET_WIN32 -#define AMD64_CALLEE_REGS ((1< 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + unsigned char _x64_rex_bits = \ + (((width) > 4) ? X64_REX_W : 0) | \ + (((reg_modrm) > 7) ? X64_REX_R : 0) | \ + (((reg_index) > 7) ? X64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \ + if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \ } while (0) #elif defined(__native_client_codegen__) -#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ +#define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ { \ - unsigned char _amd64_rex_bits = \ - (((width) > 4) ? AMD64_REX_W : 0) | \ - (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \ - (((reg_index) > 7) ? AMD64_REX_X : 0) | \ - (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \ - amd64_nacl_tag_rex((inst)); \ - if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \ + unsigned char _x64_rex_bits = \ + (((width) > 4) ? X64_REX_W : 0) | \ + (((reg_modrm) > 7) ? X64_REX_R : 0) | \ + (((reg_index) > 7) ? X64_REX_X : 0) | \ + (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \ + x64_nacl_tag_rex((inst)); \ + if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \ } while (0) #endif typedef union { guint64 val; unsigned char b [8]; -} amd64_imm_buf; +} x64_imm_buf; #include "../x86/x86-codegen.h" @@ -170,23 +170,23 @@ typedef union { #undef X86_IS_BYTE_REG #define X86_IS_BYTE_REG(reg) 1 -#define amd64_modrm_mod(modrm) ((modrm) >> 6) -#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) -#define amd64_modrm_rm(modrm) ((modrm) & 0x7) +#define x64_modrm_mod(modrm) ((modrm) >> 6) +#define x64_modrm_reg(modrm) (((modrm) >> 3) & 0x7) +#define x64_modrm_rm(modrm) ((modrm) & 0x7) -#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) -#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) -#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) +#define x64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3) +#define x64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3) +#define x64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3) -#define amd64_sib_scale(sib) ((sib) >> 6) -#define amd64_sib_index(sib) (((sib) >> 3) & 0x7) -#define amd64_sib_base(sib) ((sib) & 0x7) +#define x64_sib_scale(sib) ((sib) >> 6) +#define x64_sib_index(sib) (((sib) >> 3) & 0x7) +#define x64_sib_base(sib) ((sib) & 0x7) -#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) +#define x64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ do { \ - amd64_imm_buf imb; \ + x64_imm_buf imb; \ imb.val = (guint64) (imm); \ *(inst)++ = imb.b [0]; \ *(inst)++ = imb.b [1]; \ @@ -198,8 +198,8 @@ typedef union { *(inst)++ = imb.b [7]; \ } while (0) -#define amd64_membase_emit(inst,reg,basereg,disp) do { \ - if ((basereg) == AMD64_RIP) { \ +#define x64_membase_emit(inst,reg,basereg,disp) do { \ + if ((basereg) == X64_RIP) { \ x86_address_byte ((inst), 0, (reg)&0x7, 5); \ x86_imm_emit32 ((inst), (disp)); \ } \ @@ -207,126 +207,126 @@ typedef union { x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ } while (0) -#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ +#define x64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ do { \ if (x86_is_imm8((imm))) { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ + x64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x83; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit8 ((inst), (imm)); \ - } else if ((reg) == AMD64_RAX) { \ - amd64_emit_rex(inst, size, 0, 0, 0); \ + } else if ((reg) == X64_RAX) { \ + x64_emit_rex(inst, size, 0, 0, 0); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \ x86_imm_emit32 ((inst), (imm)); \ } else { \ - amd64_emit_rex(inst, size, 0, 0, (reg)); \ + x64_emit_rex(inst, size, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x81; \ x86_reg_emit ((inst), (opc), (reg)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0) -#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ +#define x64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \ do { \ - amd64_emit_rex(inst, size, (dreg), 0, (reg)); \ + x64_emit_rex(inst, size, (dreg), 0, (reg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) #if defined(__default_codegen__) -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) +#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) +#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) #elif defined(__native_client_codegen__) /* NaCl modules may not directly update RSP or RBP other than direct copies */ /* between them. Instead the lower 4 bytes are updated and then added to R15 */ -#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP)) +#define x64_is_nacl_stack_reg(reg) (((reg) == X64_RSP) || ((reg) == X64_RBP)) -#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \ +#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \ do{ \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg(reg)) { \ if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \ g_assert_not_reached(); \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ + x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \ /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \ } else { \ - amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ + x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while(0) -#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ +#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg((dreg)) && ((reg) != X64_R15)) { \ if (((opc) != X86_ADD && (opc) != X86_SUB)) \ g_assert_not_reached(); \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ + x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \ /* Use LEA instead of ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \ + x64_lea_memindex_size((inst), (dreg), (dreg), 0, X64_R15, 0, 8); \ } else { \ - amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ + x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif /*__native_client_codegen__*/ -#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) +#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) -#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) +#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8) -#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ +#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst),(size),(reg),0,(basereg)); \ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \ - amd64_membase_emit (inst, reg, basereg, disp); \ - amd64_codegen_post(inst); \ + x64_membase_emit (inst, reg, basereg, disp); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_regp_reg(inst,regp,reg,size) \ +#define x64_mov_regp_reg(inst,regp,reg,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (regp)); \ + x64_emit_rex(inst, (size), (reg), 0, (regp)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_regp_emit ((inst), (reg), (regp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \ +#define x64_mov_membase_reg(inst,basereg,disp,reg,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + x64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_mem_reg(inst,mem,reg,size) \ +#define x64_mov_mem_reg(inst,mem,reg,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ + x64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x88; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \ @@ -335,30 +335,30 @@ typedef union { x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_reg_reg(inst,dreg,reg,size) \ +#define x64_mov_reg_reg(inst,dreg,reg,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \ + x64_emit_rex(inst, (size), (dreg), 0, (reg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_reg_mem_body(inst,reg,mem,size) \ +#define x64_mov_reg_mem_body(inst,reg,mem,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, 0); \ + x64_emit_rex(inst, (size), (reg), 0, 0); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ @@ -367,86 +367,86 @@ typedef union { x86_address_byte ((inst), 0, (reg), 4); \ x86_address_byte ((inst), 0, 4, 5); \ x86_imm_emit32 ((inst), (mem)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #if defined(__default_codegen__) -#define amd64_mov_reg_mem(inst,reg,mem,size) \ +#define x64_mov_reg_mem(inst,reg,mem,size) \ do { \ - amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ + x64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ } while (0) #elif defined(__native_client_codegen__) /* We have to re-base memory reads because memory isn't zero based. */ -#define amd64_mov_reg_mem(inst,reg,mem,size) \ +#define x64_mov_reg_mem(inst,reg,mem,size) \ do { \ - amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \ + x64_mov_reg_membase((inst),(reg),X64_R15,(mem),(size)); \ } while (0) #endif /* __native_client_codegen__ */ -#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ +#define x64_mov_reg_membase_body(inst,reg,basereg,disp,size) \ do { \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + x64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + x64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) -#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ +#define x64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ - amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ + x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ } while (0) #if defined(__default_codegen__) -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ +#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + x64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) +#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ } while (0) #elif defined(__native_client_codegen__) -#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ +#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg((reg))) { \ /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ + x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \ /* Add %r15 using LEA to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \ } else { \ - amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ + x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while(0) -#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \ +#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \ do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg((reg))) { \ /* Clear upper 32 bits with mov of size 4 */ \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ + x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \ /* Add %r15 */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \ } else { \ - amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif /*__native_client_codegen__*/ -#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ +#define x64_movzx_reg_membase(inst,reg,basereg,disp,size) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ @@ -454,69 +454,69 @@ typedef union { default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsxd_reg_mem(inst,reg,mem) \ +#define x64_movsxd_reg_mem(inst,reg,mem) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,0); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst,8,(reg),0,0); \ *(inst)++ = (unsigned char)0x63; \ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ +#define x64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(reg),0,(basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst,8,(reg),0,(basereg)); \ *(inst)++ = (unsigned char)0x63; \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsxd_reg_reg(inst,dreg,reg) \ +#define x64_movsxd_reg_reg(inst,dreg,reg) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst,8,(dreg),0,(reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst,8,(dreg),0,(reg)); \ *(inst)++ = (unsigned char)0x63; \ x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) /* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */ -#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ +#define x64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, (size), 0, 0, (reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ x86_imm_emit64 ((inst), (guint64)(imm)); \ else \ x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_mov_reg_imm(inst,reg,imm) \ +#define x64_mov_reg_imm(inst,reg,imm) \ do { \ - int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ - amd64_codegen_post(inst); \ + int _x64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ + x64_codegen_pre(inst); \ + x64_mov_reg_imm_size ((inst), (reg), (imm), (_x64_width_temp ? 4 : 8)); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8) +#define x64_set_reg_template(inst,reg) x64_mov_reg_imm_size ((inst),(reg), 0, 8) -#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg)) +#define x64_set_template(inst,reg) x64_set_reg_template((inst),(reg)) -#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ +#define x64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((size) == 2) \ x86_prefix((inst), X86_OPERAND_PREFIX); \ - amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ + x64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ @@ -530,293 +530,293 @@ typedef union { x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_lea_membase_body(inst,reg,basereg,disp) \ +#define x64_lea_membase_body(inst,reg,basereg,disp) \ do { \ - amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ + x64_emit_rex(inst, 8, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ + x64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) #if defined(__default_codegen__) -#define amd64_lea_membase(inst,reg,basereg,disp) \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)) +#define x64_lea_membase(inst,reg,basereg,disp) \ + x64_lea_membase_body((inst), (reg), (basereg), (disp)) #elif defined(__native_client_codegen__) /* NaCl modules may not write directly into RSP/RBP. Instead, use a */ /* 32-bit LEA and add R15 to the effective address */ -#define amd64_lea_membase(inst,reg,basereg,disp) \ +#define x64_lea_membase(inst,reg,basereg,disp) \ do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg(reg)) { \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg(reg)) { \ /* 32-bit LEA */ \ - amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \ + x64_emit_rex((inst), 4, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ - amd64_membase_emit((inst), (reg), (basereg), (disp)); \ + x64_membase_emit((inst), (reg), (basereg), (disp)); \ /* Use a 64-bit LEA instead of an ADD to preserve flags */ \ - amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \ + x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \ } else { \ - amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \ + x64_lea_membase_body((inst), (reg), (basereg), (disp)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif /*__native_client_codegen__*/ /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_reg(inst,reg) \ +#define x64_push_reg(inst,reg) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) /* Instruction is implicitly 64-bits so don't generate REX for just the size. */ -#define amd64_push_membase(inst,basereg,disp) \ +#define x64_push_membase(inst,basereg,disp) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_pop_reg_body(inst,reg) \ +#define x64_pop_reg_body(inst,reg) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #if defined(__default_codegen__) -#define amd64_call_reg(inst,reg) \ +#define x64_call_reg(inst,reg) \ do { \ - amd64_emit_rex(inst, 0, 0, 0, (reg)); \ + x64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0) -#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) -#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) +#define x64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) +#define x64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) -#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg)) +#define x64_pop_reg(inst,reg) x64_pop_reg_body((inst), (reg)) #elif defined(__native_client_codegen__) /* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_reg_size(inst,reg,size) \ +#define x64_jump_reg_size(inst,reg,size) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex ((inst),0,0,0,(reg)); \ + x64_codegen_pre((inst)); \ + x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \ + x64_emit_rex ((inst),0,0,0,(reg)); \ x86_jump_reg((inst),((reg)&0x7)); \ - amd64_codegen_post((inst)); \ + x64_codegen_post((inst)); \ } while (0) /* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_mem_size(inst,mem,size) \ +#define x64_jump_mem_size(inst,mem,size) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ - amd64_codegen_post((inst)); \ + x64_codegen_pre((inst)); \ + x64_mov_reg_mem((inst), (mem), X64_R11, 4); \ + x64_jump_reg_size((inst), X64_R11, 4); \ + x64_codegen_post((inst)); \ } while (0) -#define amd64_call_reg_internal(inst,reg) \ +#define x64_call_reg_internal(inst,reg) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ - amd64_emit_rex((inst), 0, 0, 0, (reg)); \ + x64_codegen_pre((inst)); \ + x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \ + x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \ + x64_emit_rex((inst), 0, 0, 0, (reg)); \ x86_call_reg((inst), ((reg) & 0x7)); \ - amd64_codegen_post((inst)); \ + x64_codegen_post((inst)); \ } while (0) -#define amd64_call_reg(inst,reg) \ +#define x64_call_reg(inst,reg) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_call_reg_internal((inst), (reg)); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ + x64_codegen_pre((inst)); \ + x64_call_sequence_pre(inst); \ + x64_call_reg_internal((inst), (reg)); \ + x64_call_sequence_post(inst); \ + x64_codegen_post((inst)); \ } while (0) -#define amd64_ret(inst) \ +#define x64_ret(inst) \ do { \ - amd64_codegen_pre(inst); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_jump_reg_size((inst), AMD64_R11, 8); \ - amd64_codegen_post(inst); \ + x64_codegen_pre(inst); \ + x64_pop_reg_body((inst), X64_R11); \ + x64_jump_reg_size((inst), X64_R11, 8); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_leave(inst) \ +#define x64_leave(inst) \ do { \ - amd64_codegen_pre(inst); \ - amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \ - amd64_codegen_post(inst); \ + x64_codegen_pre(inst); \ + x64_mov_reg_reg((inst), X64_RSP, X64_RBP, 8); \ + x64_pop_reg_body((inst), X64_R11); \ + x64_mov_reg_reg_size((inst), X64_RBP, X64_R11, 4); \ + x64_alu_reg_reg_size((inst), X86_ADD, X64_RBP, X64_R15, 8); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_pop_reg(inst,reg) \ +#define x64_pop_reg(inst,reg) \ do { \ - amd64_codegen_pre(inst); \ - if (amd64_is_nacl_stack_reg((reg))) { \ - amd64_pop_reg_body((inst), AMD64_R11); \ - amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \ - amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \ + x64_codegen_pre(inst); \ + if (x64_is_nacl_stack_reg((reg))) { \ + x64_pop_reg_body((inst), X64_R11); \ + x64_mov_reg_reg_size((inst), (reg), X64_R11, 4); \ + x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \ } else { \ - amd64_pop_reg_body((inst), (reg)); \ + x64_pop_reg_body((inst), (reg)); \ } \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif /*__native_client_codegen__*/ -#define amd64_movsd_reg_regp(inst,reg,regp) \ +#define x64_movsd_reg_regp(inst,reg,regp) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + x64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsd_regp_reg(inst,regp,reg) \ +#define x64_movsd_regp_reg(inst,regp,reg) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + x64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movss_reg_regp(inst,reg,regp) \ +#define x64_movss_reg_regp(inst,reg,regp) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + x64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movss_regp_reg(inst,regp,reg) \ +#define x64_movss_regp_reg(inst,regp,reg) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ + x64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ +#define x64_movsd_reg_membase(inst,reg,basereg,disp) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + x64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ +#define x64_movss_reg_membase(inst,reg,basereg,disp) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + x64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ +#define x64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf2); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + x64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ +#define x64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), 0xf3); \ - amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ + x64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) /* The original inc_reg opcode is used as the REX prefix */ -#define amd64_inc_reg_size(inst,reg,size) \ +#define x64_inc_reg_size(inst,reg,size) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),0,(reg) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_dec_reg_size(inst,reg,size) \ +#define x64_dec_reg_size(inst,reg,size) \ do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),(size),0,0,(reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),1,(reg) & 0x7); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) -#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst),0,0,0,(basereg)); \ +#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst),0,0,0,(basereg)); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ - amd64_membase_emit ((inst), 0, (basereg), (disp)); \ - amd64_codegen_post(inst); \ + x64_membase_emit ((inst), 0, (basereg), (disp)); \ + x64_codegen_post(inst); \ } while (0) #if defined (__default_codegen__) /* From the AMD64 Software Optimization Manual */ -#define amd64_padding_size(inst,size) \ +#define x64_padding_size(inst,size) \ do { \ switch ((size)) { \ case 1: *(inst)++ = 0x90; break; \ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ + default: x64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ }; \ } while (0) -#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) -#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) +#define x64_call_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst),2, (basereg),(disp)); } while (0) +#define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) -#define amd64_jump_code_size(inst,target,size) do { \ - if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ +#define x64_jump_code_size(inst,target,size) do { \ + if (x64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ x86_jump_code((inst),(target)); \ } else { \ - amd64_jump_membase ((inst), AMD64_RIP, 0); \ + x64_jump_membase ((inst), X64_RIP, 0); \ *(guint64*)(inst) = (guint64)(target); \ (inst) += 8; \ } \ @@ -824,13 +824,13 @@ typedef union { #elif defined(__native_client_codegen__) -/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */ +/* The 3-7 byte NOP sequences in x64_padding_size below are all illegal in */ /* 64-bit Native Client because they load into rSP/rBP or use duplicate */ /* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */ /* Intel64 and IA-32 Architectures Optimization Reference Manual and */ /* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */ -#define amd64_padding_size(inst,size) \ +#define x64_padding_size(inst,size) \ do { \ unsigned char *code_start = (inst); \ switch ((size)) { \ @@ -845,31 +845,31 @@ typedef union { break; \ /* nop 0x0(%rax) */ \ case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 1, 0, AMD64_RAX); \ + x86_address_byte ((inst), 1, 0, X64_RAX); \ x86_imm_emit8 ((inst), 0); \ break; \ /* nop 0x0(%rax,%rax) */ \ case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \ x86_imm_emit8 ((inst), 0); \ break; \ /* nopw 0x0(%rax,%rax) */ \ case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \ *(inst)++ = 0x1f; \ x86_address_byte ((inst), 1, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \ x86_imm_emit8 ((inst), 0); \ break; \ /* nop 0x0(%rax) (32-bit displacement) */ \ case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ - x86_address_byte ((inst), 2, 0, AMD64_RAX); \ + x86_address_byte ((inst), 2, 0, X64_RAX); \ x86_imm_emit32((inst), 0); \ break; \ /* nop 0x0(%rax,%rax) (32-bit displacement) */ \ case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \ x86_address_byte ((inst), 2, 0, 4); \ - x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \ + x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \ x86_imm_emit32 ((inst), 0); \ break; \ default: \ @@ -880,36 +880,36 @@ typedef union { /* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_membase_size(inst,basereg,disp,size) \ +#define x64_call_membase_size(inst,basereg,disp,size) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre(inst); \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_call_reg_internal((inst), AMD64_R11); \ - amd64_call_sequence_post(inst); \ - amd64_codegen_post((inst)); \ + x64_codegen_pre((inst)); \ + x64_call_sequence_pre(inst); \ + x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \ + x64_call_reg_internal((inst), X64_R11); \ + x64_call_sequence_post(inst); \ + x64_codegen_post((inst)); \ } while (0) /* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_jump_membase_size(inst,basereg,disp,size) \ +#define x64_jump_membase_size(inst,basereg,disp,size) \ do { \ - amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \ - amd64_jump_reg_size((inst), AMD64_R11, 4); \ + x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \ + x64_jump_reg_size((inst), X64_R11, 4); \ } while (0) /* On Native Client we can't jump more than INT_MAX in either direction */ -#define amd64_jump_code_size(inst,target,size) \ +#define x64_jump_code_size(inst,target,size) \ do { \ /* x86_jump_code used twice in case of */ \ - /* relocation by amd64_codegen_post */ \ + /* relocation by x64_codegen_post */ \ guint8* jump_start; \ - amd64_codegen_pre(inst); \ - assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ + x64_codegen_pre(inst); \ + assert(x64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ x86_jump_code((inst),(target)); \ - inst = amd64_codegen_post(inst); \ + inst = x64_codegen_post(inst); \ jump_start = (inst); \ x86_jump_code((inst),(target)); \ - mono_amd64_patch(jump_start, (target)); \ + mono_x64_patch(jump_start, (target)); \ } while (0) #endif /*__native_client_codegen__*/ @@ -923,39 +923,39 @@ typedef union { /* Two opcode SSE defines */ #define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) #define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ + x64_membase_emit ((inst), (reg), (basereg), (disp)); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ - amd64_codegen_pre(inst); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ + x64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + x64_codegen_post(inst); \ } while (0) /* Three opcode SSE defines */ @@ -967,869 +967,869 @@ typedef union { } while (0) #define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ *(inst)++ = (unsigned char)(op1); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + x64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0) #define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \ x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ + x64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ + x64_membase_emit ((inst), (reg), (basereg), (disp)); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \ + x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ - amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - amd64_codegen_post(inst); \ + x64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + x64_codegen_post(inst); \ } while (0) /* Four opcode SSE defines */ #define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ x86_prefix((inst), (unsigned char)(op1)); \ - amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + x64_emit_rex ((inst), size, (dreg), 0, (reg)); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \ *(inst)++ = (unsigned char)(op4); \ x86_reg_emit ((inst), (dreg), (reg)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0) /* specific SSE opcode defines */ -#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) +#define x64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57) -#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) +#define x64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57) -#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) +#define x64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54) -#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) +#define x64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10) -#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) +#define x64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10) -#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) +#define x64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11) -#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) +#define x64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11) -#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) +#define x64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10) -#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) +#define x64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f) -#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) +#define x64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f) -#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) +#define x64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e) -#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) +#define x64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8) -#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) +#define x64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size)) -#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) +#define x64_sse_cvttsd2si_reg_reg(inst,dreg,reg) x64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8) -#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) +#define x64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size)) -#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) +#define x64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) x64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8) -#define amd64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) +#define x64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size)) -#define amd64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) +#define x64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) x64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8) -#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) +#define x64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a) -#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) +#define x64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a) -#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) +#define x64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58) -#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) +#define x64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c) -#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) +#define x64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59) -#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) +#define x64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e) -#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) +#define x64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51) -#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) +#define x64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm)) -#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) +#define x64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm)) -#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) +#define x64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) -#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) +#define x64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) -#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) +#define x64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) -#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) +#define x64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) -#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) +#define x64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) -#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) +#define x64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) -#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) +#define x64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d) -#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) +#define x64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm)) -#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) +#define x64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54) -#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) +#define x64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55) -#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) +#define x64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56) -#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) +#define x64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57) -#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) +#define x64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51) -#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) +#define x64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52) -#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) +#define x64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53) -#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) +#define x64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0) -#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) +#define x64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c) -#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) +#define x64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d) -#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) +#define x64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16) -#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) +#define x64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12) -#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) +#define x64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm)) -#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) +#define x64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm)) -#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) +#define x64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm)) -#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) +#define x64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm)) -#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) +#define x64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm)) -#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) +#define x64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58) -#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) +#define x64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e) -#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) +#define x64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59) -#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) +#define x64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c) -#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) +#define x64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f) -#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) +#define x64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d) -#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) +#define x64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm)) -#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) +#define x64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54) -#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) +#define x64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55) -#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) +#define x64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56) -#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) +#define x64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51) -#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) +#define x64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52) -#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) +#define x64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53) -#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) +#define x64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0) -#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) +#define x64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c) -#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) +#define x64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d) -#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) +#define x64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12) -#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) +#define x64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7) -#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) +#define x64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb) -#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) +#define x64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb) -#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) +#define x64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef) -#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) +#define x64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc) -#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) +#define x64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd) -#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) +#define x64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe) -#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) +#define x64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4) -#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) +#define x64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8) -#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) +#define x64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9) -#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) +#define x64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa) -#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) +#define x64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb) -#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) +#define x64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde) -#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) +#define x64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e) -#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) +#define x64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f) -#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) +#define x64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c) -#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) +#define x64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee) -#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) +#define x64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d) -#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) +#define x64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0) -#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) +#define x64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) -#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) +#define x64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda) -#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) +#define x64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a) -#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) +#define x64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b) -#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) +#define x64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38) -#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) +#define x64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea) -#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) +#define x64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39) -#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) +#define x64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74) -#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) +#define x64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75) -#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) +#define x64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76) -#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) +#define x64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29) -#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) +#define x64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64) -#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) +#define x64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65) -#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) +#define x64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66) -#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) +#define x64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37) -#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) +#define x64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6) -#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) +#define x64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60) -#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) +#define x64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61) -#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) +#define x64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62) -#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) +#define x64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c) -#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) +#define x64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14) -#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) +#define x64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14) -#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) +#define x64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68) -#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) +#define x64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69) -#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) +#define x64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a) -#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) +#define x64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d) -#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) +#define x64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15) -#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) +#define x64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15) -#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) +#define x64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63) -#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) +#define x64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b) -#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) +#define x64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67) -#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) +#define x64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b) -#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) +#define x64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc) -#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) +#define x64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) -#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) +#define x64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd) -#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) +#define x64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8) -#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) +#define x64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec) -#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) +#define x64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8) -#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) +#define x64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed) -#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) +#define x64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9) -#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) +#define x64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5) -#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) +#define x64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40) -#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) +#define x64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4) -#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) +#define x64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4) -#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) +#define x64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5) -#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) +#define x64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm)) -#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) +#define x64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1) -#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) +#define x64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm)) -#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) +#define x64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1) -#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) +#define x64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm)) -#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) +#define x64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1) -#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) +#define x64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm)) -#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) +#define x64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2) -#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) +#define x64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm)) -#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) +#define x64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2) -#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) +#define x64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm)) -#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) +#define x64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2) -#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) +#define x64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm)) -#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) +#define x64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3) -#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) +#define x64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm)) -#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) +#define x64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3) -#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) +#define x64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm)) -#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) +#define x64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3) -#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) +#define x64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6) -#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) +#define x64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B) -#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) +#define x64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6) -#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) +#define x64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A) -#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) +#define x64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B) -#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) +#define x64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A) -#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) +#define x64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6) -#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) +#define x64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B) -#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) +#define x64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size)) -#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) +#define x64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size)) -#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) +#define x64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) -#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) +#define x64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) -#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) +#define x64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) -#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) +#define x64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) -#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) +#define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) -#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) +#define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) -#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) +#define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) -#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) +#define x64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) -#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) +#define x64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) -#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) +#define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) /* Generated from x86-codegen.h */ -#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0) -#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) -#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0) -#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0) -#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0) -#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0) -//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0) -#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0) -#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0) -#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0) -#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0) -#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0) +#define x64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) +#define x64_cld_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_cld(inst); x64_codegen_post(inst); } while (0) +#define x64_stosb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); x64_codegen_post(inst); } while (0) +#define x64_stosl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); x64_codegen_post(inst); } while (0) +#define x64_stosd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); x64_codegen_post(inst); } while (0) +#define x64_movsb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); x64_codegen_post(inst); } while (0) +#define x64_movsl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); x64_codegen_post(inst); } while (0) +#define x64_movsd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); x64_codegen_post(inst); } while (0) +#define x64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0) +#define x64_rdtsc_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); x64_codegen_post(inst); } while (0) +#define x64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_xchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_xchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_inc_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_inc_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +//#define x64_inc_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_dec_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_dec_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +//#define x64_dec_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_not_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_not_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_not_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_neg_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_neg_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_neg_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_nop_size(inst,size) do { x64_codegen_pre(inst); x86_nop(inst); x64_codegen_post(inst); } while (0) +//#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0) +#define x64_alu_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0) +#define x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0) +#define x64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0) +#define x64_alu_mem_reg_size(inst,opc,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0) +//#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); x64_codegen_post(inst); } while (0) +#define x64_alu_reg_mem_size(inst,opc,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0) +//#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_test_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0) +#define x64_test_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); x64_codegen_post(inst); } while (0) +#define x64_test_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0) +#define x64_test_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_test_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_test_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_shift_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0) +#define x64_shift_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0) +#define x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0) +#define x64_shift_reg_size(inst,opc,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_shift_mem_size(inst,opc,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); x64_codegen_post(inst); } while (0) +#define x64_shift_membase_size(inst,opc,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_shrd_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0) +#define x64_shld_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0) +#define x64_mul_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_mul_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_mul_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); x64_codegen_post(inst); } while (0) +#define x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0) +#define x64_div_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_div_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_div_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_mov_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_regp_reg_size(inst,regp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_mov_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_clear_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +//#define x64_mov_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0) +#define x64_mov_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +//#define x64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0) +#define x64_lea_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0) +//#define x64_lea_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0) +#define x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); x64_codegen_post(inst); } while (0) +#define x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); x64_codegen_post(inst); } while (0) +#define x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); x64_codegen_post(inst); } while (0) +#define x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); x64_codegen_post(inst); } while (0) +#define x64_cdq_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); x64_codegen_post(inst); } while (0) +#define x64_wait_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); x64_codegen_post(inst); } while (0) +#define x64_fp_op_mem_size(inst,opc,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); x64_codegen_post(inst); } while (0) +#define x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0) +#define x64_fp_op_size(inst,opc,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); x64_codegen_post(inst); } while (0) +#define x64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); x64_codegen_post(inst); } while (0) +#define x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0) +#define x64_fstp_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fcompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); x64_codegen_post(inst); } while (0) +#define x64_fucompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); x64_codegen_post(inst); } while (0) +#define x64_fnstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); x64_codegen_post(inst); } while (0) +#define x64_fnstcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_fnstcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_fldcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_fldcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_fchs_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); x64_codegen_post(inst); } while (0) +#define x64_frem_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_frem(inst); x64_codegen_post(inst); } while (0) +#define x64_fxch_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fcomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fcomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fucomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fucomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fld_size(inst,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); x64_codegen_post(inst); } while (0) +//#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0) +#define x64_fld80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_fld80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_fild_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0) +#define x64_fild_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0) +#define x64_fld_reg_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); x64_codegen_post(inst); } while (0) +#define x64_fldz_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); x64_codegen_post(inst); } while (0) +#define x64_fld1_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); x64_codegen_post(inst); } while (0) +#define x64_fldpi_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); x64_codegen_post(inst); } while (0) +#define x64_fst_size(inst,mem,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0) +#define x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0) +#define x64_fst80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_fst80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_fist_pop_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0) +#define x64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0) +#define x64_fstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); x64_codegen_post(inst); } while (0) +#define x64_fist_membase_size(inst,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0) +//#define x64_push_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_push_regp_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_push_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +//#define x64_push_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0) +#define x64_push_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); x64_codegen_post(inst); } while (0) +//#define x64_pop_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_pop_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); x64_codegen_post(inst); } while (0) +#define x64_pop_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_pushad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); x64_codegen_post(inst); } while (0) +#define x64_pushfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); x64_codegen_post(inst); } while (0) +#define x64_popad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); x64_codegen_post(inst); } while (0) +#define x64_popfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); x64_codegen_post(inst); } while (0) +#define x64_loop_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); x64_codegen_post(inst); } while (0) +#define x64_loope_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); x64_codegen_post(inst); } while (0) +#define x64_loopne_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); x64_codegen_post(inst); } while (0) +#define x64_jump32_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); x64_codegen_post(inst); } while (0) +#define x64_jump8_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); x64_codegen_post(inst); } while (0) #if !defined( __native_client_codegen__ ) /* Defined above for Native Client, so they can be used in other macros */ -#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) -#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) +#define x64_jump_reg_size(inst,reg,size) do { x64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) +#define x64_jump_mem_size(inst,mem,size) do { x64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #endif -#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) -#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0) +#define x64_jump_disp_size(inst,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); x64_codegen_post(inst); } while (0) +#define x64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) +#define x64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) +#define x64_branch_size_body(inst,cond,target,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); x64_codegen_post(inst); } while (0) #if defined(__default_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) +#define x64_branch_size(inst,cond,target,is_signed,size) do { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) #elif defined(__native_client_codegen__) -#define amd64_branch_size(inst,cond,target,is_signed,size) \ +#define x64_branch_size(inst,cond,target,is_signed,size) \ do { \ - /* amd64_branch_size_body used twice in */ \ - /* case of relocation by amd64_codegen_post */ \ + /* x64_branch_size_body used twice in */ \ + /* case of relocation by x64_codegen_post */ \ guint8* branch_start; \ - amd64_codegen_pre(inst); \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - inst = amd64_codegen_post(inst); \ + x64_codegen_pre(inst); \ + x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + inst = x64_codegen_post(inst); \ branch_start = inst; \ - amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ - mono_amd64_patch(branch_start, (target)); \ + x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ + mono_x64_patch(branch_start, (target)); \ } while (0) #endif -#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0) -#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0) -//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0) +#define x64_branch_disp_size(inst,cond,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_set_reg_size(inst,cond,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_set_mem_size(inst,cond,mem,is_signed,size) do { x64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); x64_codegen_post(inst); } while (0) +#define x64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0) +//#define x64_call_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_call_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); x64_codegen_post(inst); } while (0) #if defined(__default_codegen__) -#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) -#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) +#define x64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) +#define x64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) #elif defined(__native_client_codegen__) /* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ -#define amd64_call_imm_size(inst,disp,size) \ +#define x64_call_imm_size(inst,disp,size) \ do { \ - amd64_codegen_pre((inst)); \ - amd64_call_sequence_pre((inst)); \ + x64_codegen_pre((inst)); \ + x64_call_sequence_pre((inst)); \ x86_call_imm((inst),(disp)); \ - amd64_call_sequence_post((inst)); \ - amd64_codegen_post((inst)); \ + x64_call_sequence_post((inst)); \ + x64_codegen_post((inst)); \ } while (0) /* x86_call_code is called twice below, first so we can get the size of the */ /* call sequence, and again so the exact offset from "inst" is used, since */ -/* the sequence could have moved from amd64_call_sequence_post. */ +/* the sequence could have moved from x64_call_sequence_post. */ /* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ -#define amd64_call_code_size(inst,target,size) \ +#define x64_call_code_size(inst,target,size) \ do { \ - amd64_codegen_pre((inst)); \ + x64_codegen_pre((inst)); \ guint8* adjusted_start; \ guint8* call_start; \ - amd64_call_sequence_pre((inst)); \ + x64_call_sequence_pre((inst)); \ x86_call_code((inst),(target)); \ - adjusted_start = amd64_call_sequence_post((inst)); \ + adjusted_start = x64_call_sequence_post((inst)); \ call_start = adjusted_start; \ x86_call_code(adjusted_start, (target)); \ - amd64_codegen_post((inst)); \ - mono_amd64_patch(call_start, (target)); \ + x64_codegen_post((inst)); \ + mono_x64_patch(call_start, (target)); \ } while (0) #endif /*__native_client_codegen__*/ -//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0) -#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0) -#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0) -//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0) -#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0) -#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0) -#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0) -#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0) -//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0) -#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0) -#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0) - - - - -#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8) -#define amd64_cld(inst) amd64_cld_size(inst,8) -#define amd64_stosb(inst) amd64_stosb_size(inst,8) -#define amd64_stosl(inst) amd64_stosl_size(inst,8) -#define amd64_stosd(inst) amd64_stosd_size(inst,8) -#define amd64_movsb(inst) amd64_movsb_size(inst,8) -#define amd64_movsl(inst) amd64_movsl_size(inst,8) -#define amd64_movsd(inst) amd64_movsd_size(inst,8) -#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8) -#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8) -#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8) -#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8) -#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size) -#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size) -#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size) -#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size) -#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8) -#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8) -#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8) -#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8) -#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8) -#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8) -#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8) -#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8) -#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8) -#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8) -#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8) -#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8) -#define amd64_nop(inst) amd64_nop_size(inst,8) -//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8) -#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) -//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8) -#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) -#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8) -#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) -#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8) -#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8) -#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8) -#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8) -#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8) -#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8) -#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8) -#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8) -#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) -#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8) -#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8) -#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8) -#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8) -#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8) -#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8) -#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8) -#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8) -#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8) -#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8) -#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8) -#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8) -#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) -#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) -#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) -#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8) -#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8) -#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8) -//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size) -//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size) -//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) -#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) -//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size) -//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size) -//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) -#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) -#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8) -//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8) -#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size) -//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) -#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) -#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8) -//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8) -#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) -#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) -#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) -#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) -#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) -#define amd64_cdq(inst) amd64_cdq_size(inst,8) -#define amd64_wait(inst) amd64_wait_size(inst,8) -#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8) -#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) -#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8) -#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8) -#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) -#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8) -#define amd64_fcompp(inst) amd64_fcompp_size(inst,8) -#define amd64_fucompp(inst) amd64_fucompp_size(inst,8) -#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8) -#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8) -#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8) -#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8) -#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8) -#define amd64_fchs(inst) amd64_fchs_size(inst,8) -#define amd64_frem(inst) amd64_frem_size(inst,8) -#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8) -#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8) -#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8) -#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8) -#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8) -#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8) -#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8) -#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8) -#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8) -#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8) -#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8) -#define amd64_fldz(inst) amd64_fldz_size(inst,8) -#define amd64_fld1(inst) amd64_fld1_size(inst,8) -#define amd64_fldpi(inst) amd64_fldpi_size(inst,8) -#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8) -#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) -#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8) -#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8) -#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8) -#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8) -#define amd64_fstsw(inst) amd64_fstsw_size(inst,8) -#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8) -//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8) -#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8) -#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8) -//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8) -#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) -#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8) -//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8) -#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8) -#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8) -#define amd64_pushad(inst) amd64_pushad_size(inst,8) -#define amd64_pushfd(inst) amd64_pushfd_size(inst,8) -#define amd64_popad(inst) amd64_popad_size(inst,8) -#define amd64_popfd(inst) amd64_popfd_size(inst,8) -#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8) -#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8) -#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8) -#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8) -#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8) -#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8) -#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8) -#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8) -#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8) -#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8) -#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8) -#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8) -#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8) -#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8) -#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8) -#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8) -#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8) -#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8) -//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8) -#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8) -#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8) -#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8) -//#define amd64_ret(inst) amd64_ret_size(inst,8) -#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8) -#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) -#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) -#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) -#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize) -//#define amd64_leave(inst) amd64_leave_size(inst,8) -#define amd64_sahf(inst) amd64_sahf_size(inst,8) -#define amd64_fsin(inst) amd64_fsin_size(inst,8) -#define amd64_fcos(inst) amd64_fcos_size(inst,8) -#define amd64_fabs(inst) amd64_fabs_size(inst,8) -#define amd64_ftst(inst) amd64_ftst_size(inst,8) -#define amd64_fxam(inst) amd64_fxam_size(inst,8) -#define amd64_fpatan(inst) amd64_fpatan_size(inst,8) -#define amd64_fprem(inst) amd64_fprem_size(inst,8) -#define amd64_fprem1(inst) amd64_fprem1_size(inst,8) -#define amd64_frndint(inst) amd64_frndint_size(inst,8) -#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8) -#define amd64_fptan(inst) amd64_fptan_size(inst,8) -#define amd64_padding(inst,size) amd64_padding_size(inst,size) -#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8) -#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8) - -#endif // AMD64_H +//#define x64_ret_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); x64_codegen_post(inst); } while (0) +#define x64_ret_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); x64_codegen_post(inst); } while (0) +#define x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0) +#define x64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0) +#define x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0) +#define x64_enter_size(inst,framesize) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); x64_codegen_post(inst); } while (0) +//#define x64_leave_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); x64_codegen_post(inst); } while (0) +#define x64_sahf_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); x64_codegen_post(inst); } while (0) +#define x64_fsin_size(inst,size) do { x64_codegen_pre(inst); x86_fsin(inst); x64_codegen_post(inst); } while (0) +#define x64_fcos_size(inst,size) do { x64_codegen_pre(inst); x86_fcos(inst); x64_codegen_post(inst); } while (0) +#define x64_fabs_size(inst,size) do { x64_codegen_pre(inst); x86_fabs(inst); x64_codegen_post(inst); } while (0) +#define x64_ftst_size(inst,size) do { x64_codegen_pre(inst); x86_ftst(inst); x64_codegen_post(inst); } while (0) +#define x64_fxam_size(inst,size) do { x64_codegen_pre(inst); x86_fxam(inst); x64_codegen_post(inst); } while (0) +#define x64_fpatan_size(inst,size) do { x64_codegen_pre(inst); x86_fpatan(inst); x64_codegen_post(inst); } while (0) +#define x64_fprem_size(inst,size) do { x64_codegen_pre(inst); x86_fprem(inst); x64_codegen_post(inst); } while (0) +#define x64_fprem1_size(inst,size) do { x64_codegen_pre(inst); x86_fprem1(inst); x64_codegen_post(inst); } while (0) +#define x64_frndint_size(inst,size) do { x64_codegen_pre(inst); x86_frndint(inst); x64_codegen_post(inst); } while (0) +#define x64_fsqrt_size(inst,size) do { x64_codegen_pre(inst); x86_fsqrt(inst); x64_codegen_post(inst); } while (0) +#define x64_fptan_size(inst,size) do { x64_codegen_pre(inst); x86_fptan(inst); x64_codegen_post(inst); } while (0) +//#define x64_padding_size(inst,size) do { x64_codegen_pre(inst); x86_padding((inst),(size)); x64_codegen_post(inst); } while (0) +#define x64_prolog_size(inst,frame_size,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); x64_codegen_post(inst); } while (0) +#define x64_epilog_size(inst,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); x64_codegen_post(inst); } while (0) +#define x64_xadd_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); x64_codegen_post(inst); } while (0) +#define x64_xadd_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); x64_codegen_post(inst); } while (0) +#define x64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); x64_codegen_post(inst); } while (0) + + + + +#define x64_breakpoint(inst) x64_breakpoint_size(inst,8) +#define x64_cld(inst) x64_cld_size(inst,8) +#define x64_stosb(inst) x64_stosb_size(inst,8) +#define x64_stosl(inst) x64_stosl_size(inst,8) +#define x64_stosd(inst) x64_stosd_size(inst,8) +#define x64_movsb(inst) x64_movsb_size(inst,8) +#define x64_movsl(inst) x64_movsl_size(inst,8) +#define x64_movsd(inst) x64_movsd_size(inst,8) +#define x64_prefix(inst,p) x64_prefix_size(inst,p,8) +#define x64_rdtsc(inst) x64_rdtsc_size(inst,8) +#define x64_cmpxchg_reg_reg(inst,dreg,reg) x64_cmpxchg_reg_reg_size(inst,dreg,reg,8) +#define x64_cmpxchg_mem_reg(inst,mem,reg) x64_cmpxchg_mem_reg_size(inst,mem,reg,8) +#define x64_cmpxchg_membase_reg(inst,basereg,disp,reg) x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8) +#define x64_xchg_reg_reg(inst,dreg,reg,size) x64_xchg_reg_reg_size(inst,dreg,reg,size) +#define x64_xchg_mem_reg(inst,mem,reg,size) x64_xchg_mem_reg_size(inst,mem,reg,size) +#define x64_xchg_membase_reg(inst,basereg,disp,reg,size) x64_xchg_membase_reg_size(inst,basereg,disp,reg,size) +#define x64_xadd_reg_reg(inst,dreg,reg,size) x64_xadd_reg_reg_size(inst,dreg,reg,size) +#define x64_xadd_mem_reg(inst,mem,reg,size) x64_xadd_mem_reg_size(inst,mem,reg,size) +#define x64_xadd_membase_reg(inst,basereg,disp,reg,size) x64_xadd_membase_reg_size(inst,basereg,disp,reg,size) +#define x64_inc_mem(inst,mem) x64_inc_mem_size(inst,mem,8) +#define x64_inc_membase(inst,basereg,disp) x64_inc_membase_size(inst,basereg,disp,8) +#define x64_inc_reg(inst,reg) x64_inc_reg_size(inst,reg,8) +#define x64_dec_mem(inst,mem) x64_dec_mem_size(inst,mem,8) +#define x64_dec_membase(inst,basereg,disp) x64_dec_membase_size(inst,basereg,disp,8) +#define x64_dec_reg(inst,reg) x64_dec_reg_size(inst,reg,8) +#define x64_not_mem(inst,mem) x64_not_mem_size(inst,mem,8) +#define x64_not_membase(inst,basereg,disp) x64_not_membase_size(inst,basereg,disp,8) +#define x64_not_reg(inst,reg) x64_not_reg_size(inst,reg,8) +#define x64_neg_mem(inst,mem) x64_neg_mem_size(inst,mem,8) +#define x64_neg_membase(inst,basereg,disp) x64_neg_membase_size(inst,basereg,disp,8) +#define x64_neg_reg(inst,reg) x64_neg_reg_size(inst,reg,8) +#define x64_nop(inst) x64_nop_size(inst,8) +//#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size(inst,opc,reg,imm,8) +#define x64_alu_mem_imm(inst,opc,mem,imm) x64_alu_mem_imm_size(inst,opc,mem,imm,8) +#define x64_alu_membase_imm(inst,opc,basereg,disp,imm) x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define x64_alu_mem_reg(inst,opc,mem,reg) x64_alu_mem_reg_size(inst,opc,mem,reg,8) +#define x64_alu_membase_reg(inst,opc,basereg,disp,reg) x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8) +//#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size(inst,opc,dreg,reg,8) +#define x64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8) +#define x64_alu_reg_mem(inst,opc,reg,mem) x64_alu_reg_mem_size(inst,opc,reg,mem,8) +#define x64_alu_reg_membase(inst,opc,reg,basereg,disp) x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8) +#define x64_test_reg_imm(inst,reg,imm) x64_test_reg_imm_size(inst,reg,imm,8) +#define x64_test_mem_imm(inst,mem,imm) x64_test_mem_imm_size(inst,mem,imm,8) +#define x64_test_membase_imm(inst,basereg,disp,imm) x64_test_membase_imm_size(inst,basereg,disp,imm,8) +#define x64_test_reg_reg(inst,dreg,reg) x64_test_reg_reg_size(inst,dreg,reg,8) +#define x64_test_mem_reg(inst,mem,reg) x64_test_mem_reg_size(inst,mem,reg,8) +#define x64_test_membase_reg(inst,basereg,disp,reg) x64_test_membase_reg_size(inst,basereg,disp,reg,8) +#define x64_shift_reg_imm(inst,opc,reg,imm) x64_shift_reg_imm_size(inst,opc,reg,imm,8) +#define x64_shift_mem_imm(inst,opc,mem,imm) x64_shift_mem_imm_size(inst,opc,mem,imm,8) +#define x64_shift_membase_imm(inst,opc,basereg,disp,imm) x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8) +#define x64_shift_reg(inst,opc,reg) x64_shift_reg_size(inst,opc,reg,8) +#define x64_shift_mem(inst,opc,mem) x64_shift_mem_size(inst,opc,mem,8) +#define x64_shift_membase(inst,opc,basereg,disp) x64_shift_membase_size(inst,opc,basereg,disp,8) +#define x64_shrd_reg(inst,dreg,reg) x64_shrd_reg_size(inst,dreg,reg,8) +#define x64_shrd_reg_imm(inst,dreg,reg,shamt) x64_shrd_reg_imm_size(inst,dreg,reg,shamt,8) +#define x64_shld_reg(inst,dreg,reg) x64_shld_reg_size(inst,dreg,reg,8) +#define x64_shld_reg_imm(inst,dreg,reg,shamt) x64_shld_reg_imm_size(inst,dreg,reg,shamt,8) +#define x64_mul_reg(inst,reg,is_signed) x64_mul_reg_size(inst,reg,is_signed,8) +#define x64_mul_mem(inst,mem,is_signed) x64_mul_mem_size(inst,mem,is_signed,8) +#define x64_mul_membase(inst,basereg,disp,is_signed) x64_mul_membase_size(inst,basereg,disp,is_signed,8) +#define x64_imul_reg_reg(inst,dreg,reg) x64_imul_reg_reg_size(inst,dreg,reg,8) +#define x64_imul_reg_mem(inst,reg,mem) x64_imul_reg_mem_size(inst,reg,mem,8) +#define x64_imul_reg_membase(inst,reg,basereg,disp) x64_imul_reg_membase_size(inst,reg,basereg,disp,8) +#define x64_imul_reg_reg_imm(inst,dreg,reg,imm) x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8) +#define x64_imul_reg_mem_imm(inst,reg,mem,imm) x64_imul_reg_mem_imm_size(inst,reg,mem,imm,8) +#define x64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8) +#define x64_div_reg(inst,reg,is_signed) x64_div_reg_size(inst,reg,is_signed,8) +#define x64_div_mem(inst,mem,is_signed) x64_div_mem_size(inst,mem,is_signed,8) +#define x64_div_membase(inst,basereg,disp,is_signed) x64_div_membase_size(inst,basereg,disp,is_signed,8) +//#define x64_mov_mem_reg(inst,mem,reg,size) x64_mov_mem_reg_size(inst,mem,reg,size) +//#define x64_mov_regp_reg(inst,regp,reg,size) x64_mov_regp_reg_size(inst,regp,reg,size) +//#define x64_mov_membase_reg(inst,basereg,disp,reg,size) x64_mov_membase_reg_size(inst,basereg,disp,reg,size) +#define x64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) +//#define x64_mov_reg_reg(inst,dreg,reg,size) x64_mov_reg_reg_size(inst,dreg,reg,size) +//#define x64_mov_reg_mem(inst,reg,mem,size) x64_mov_reg_mem_size(inst,reg,mem,size) +//#define x64_mov_reg_membase(inst,reg,basereg,disp,size) x64_mov_reg_membase_size(inst,reg,basereg,disp,size) +#define x64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) +#define x64_clear_reg(inst,reg) x64_clear_reg_size(inst,reg,8) +//#define x64_mov_reg_imm(inst,reg,imm) x64_mov_reg_imm_size(inst,reg,imm,8) +#define x64_mov_mem_imm(inst,mem,imm,size) x64_mov_mem_imm_size(inst,mem,imm,size) +//#define x64_mov_membase_imm(inst,basereg,disp,imm,size) x64_mov_membase_imm_size(inst,basereg,disp,imm,size) +#define x64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) +#define x64_lea_mem(inst,reg,mem) x64_lea_mem_size(inst,reg,mem,8) +//#define x64_lea_membase(inst,reg,basereg,disp) x64_lea_membase_size(inst,reg,basereg,disp,8) +#define x64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8) +#define x64_widen_reg(inst,dreg,reg,is_signed,is_half) x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8) +#define x64_widen_mem(inst,dreg,mem,is_signed,is_half) x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8) +#define x64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8) +#define x64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8) +#define x64_cdq(inst) x64_cdq_size(inst,8) +#define x64_wait(inst) x64_wait_size(inst,8) +#define x64_fp_op_mem(inst,opc,mem,is_double) x64_fp_op_mem_size(inst,opc,mem,is_double,8) +#define x64_fp_op_membase(inst,opc,basereg,disp,is_double) x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8) +#define x64_fp_op(inst,opc,index) x64_fp_op_size(inst,opc,index,8) +#define x64_fp_op_reg(inst,opc,index,pop_stack) x64_fp_op_reg_size(inst,opc,index,pop_stack,8) +#define x64_fp_int_op_membase(inst,opc,basereg,disp,is_int) x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8) +#define x64_fstp(inst,index) x64_fstp_size(inst,index,8) +#define x64_fcompp(inst) x64_fcompp_size(inst,8) +#define x64_fucompp(inst) x64_fucompp_size(inst,8) +#define x64_fnstsw(inst) x64_fnstsw_size(inst,8) +#define x64_fnstcw(inst,mem) x64_fnstcw_size(inst,mem,8) +#define x64_fnstcw_membase(inst,basereg,disp) x64_fnstcw_membase_size(inst,basereg,disp,8) +#define x64_fldcw(inst,mem) x64_fldcw_size(inst,mem,8) +#define x64_fldcw_membase(inst,basereg,disp) x64_fldcw_membase_size(inst,basereg,disp,8) +#define x64_fchs(inst) x64_fchs_size(inst,8) +#define x64_frem(inst) x64_frem_size(inst,8) +#define x64_fxch(inst,index) x64_fxch_size(inst,index,8) +#define x64_fcomi(inst,index) x64_fcomi_size(inst,index,8) +#define x64_fcomip(inst,index) x64_fcomip_size(inst,index,8) +#define x64_fucomi(inst,index) x64_fucomi_size(inst,index,8) +#define x64_fucomip(inst,index) x64_fucomip_size(inst,index,8) +#define x64_fld(inst,mem,is_double) x64_fld_size(inst,mem,is_double,8) +#define x64_fld_membase(inst,basereg,disp,is_double) x64_fld_membase_size(inst,basereg,disp,is_double,8) +#define x64_fld80_mem(inst,mem) x64_fld80_mem_size(inst,mem,8) +#define x64_fld80_membase(inst,basereg,disp) x64_fld80_membase_size(inst,basereg,disp,8) +#define x64_fild(inst,mem,is_long) x64_fild_size(inst,mem,is_long,8) +#define x64_fild_membase(inst,basereg,disp,is_long) x64_fild_membase_size(inst,basereg,disp,is_long,8) +#define x64_fld_reg(inst,index) x64_fld_reg_size(inst,index,8) +#define x64_fldz(inst) x64_fldz_size(inst,8) +#define x64_fld1(inst) x64_fld1_size(inst,8) +#define x64_fldpi(inst) x64_fldpi_size(inst,8) +#define x64_fst(inst,mem,is_double,pop_stack) x64_fst_size(inst,mem,is_double,pop_stack,8) +#define x64_fst_membase(inst,basereg,disp,is_double,pop_stack) x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8) +#define x64_fst80_mem(inst,mem) x64_fst80_mem_size(inst,mem,8) +#define x64_fst80_membase(inst,basereg,disp) x64_fst80_membase_size(inst,basereg,disp,8) +#define x64_fist_pop(inst,mem,is_long) x64_fist_pop_size(inst,mem,is_long,8) +#define x64_fist_pop_membase(inst,basereg,disp,is_long) x64_fist_pop_membase_size(inst,basereg,disp,is_long,8) +#define x64_fstsw(inst) x64_fstsw_size(inst,8) +#define x64_fist_membase(inst,basereg,disp,is_int) x64_fist_membase_size(inst,basereg,disp,is_int,8) +//#define x64_push_reg(inst,reg) x64_push_reg_size(inst,reg,8) +#define x64_push_regp(inst,reg) x64_push_regp_size(inst,reg,8) +#define x64_push_mem(inst,mem) x64_push_mem_size(inst,mem,8) +//#define x64_push_membase(inst,basereg,disp) x64_push_membase_size(inst,basereg,disp,8) +#define x64_push_memindex(inst,basereg,disp,indexreg,shift) x64_push_memindex_size(inst,basereg,disp,indexreg,shift,8) +#define x64_push_imm(inst,imm) x64_push_imm_size(inst,imm,8) +//#define x64_pop_reg(inst,reg) x64_pop_reg_size(inst,reg,8) +#define x64_pop_mem(inst,mem) x64_pop_mem_size(inst,mem,8) +#define x64_pop_membase(inst,basereg,disp) x64_pop_membase_size(inst,basereg,disp,8) +#define x64_pushad(inst) x64_pushad_size(inst,8) +#define x64_pushfd(inst) x64_pushfd_size(inst,8) +#define x64_popad(inst) x64_popad_size(inst,8) +#define x64_popfd(inst) x64_popfd_size(inst,8) +#define x64_loop(inst,imm) x64_loop_size(inst,imm,8) +#define x64_loope(inst,imm) x64_loope_size(inst,imm,8) +#define x64_loopne(inst,imm) x64_loopne_size(inst,imm,8) +#define x64_jump32(inst,imm) x64_jump32_size(inst,imm,8) +#define x64_jump8(inst,imm) x64_jump8_size(inst,imm,8) +#define x64_jump_reg(inst,reg) x64_jump_reg_size(inst,reg,8) +#define x64_jump_mem(inst,mem) x64_jump_mem_size(inst,mem,8) +#define x64_jump_membase(inst,basereg,disp) x64_jump_membase_size(inst,basereg,disp,8) +#define x64_jump_code(inst,target) x64_jump_code_size(inst,target,8) +#define x64_jump_disp(inst,disp) x64_jump_disp_size(inst,disp,8) +#define x64_branch8(inst,cond,imm,is_signed) x64_branch8_size(inst,cond,imm,is_signed,8) +#define x64_branch32(inst,cond,imm,is_signed) x64_branch32_size(inst,cond,imm,is_signed,8) +#define x64_branch(inst,cond,target,is_signed) x64_branch_size(inst,cond,target,is_signed,8) +#define x64_branch_disp(inst,cond,disp,is_signed) x64_branch_disp_size(inst,cond,disp,is_signed,8) +#define x64_set_reg(inst,cond,reg,is_signed) x64_set_reg_size(inst,cond,reg,is_signed,8) +#define x64_set_mem(inst,cond,mem,is_signed) x64_set_mem_size(inst,cond,mem,is_signed,8) +#define x64_set_membase(inst,cond,basereg,disp,is_signed) x64_set_membase_size(inst,cond,basereg,disp,is_signed,8) +#define x64_call_imm(inst,disp) x64_call_imm_size(inst,disp,8) +//#define x64_call_reg(inst,reg) x64_call_reg_size(inst,reg,8) +#define x64_call_mem(inst,mem) x64_call_mem_size(inst,mem,8) +#define x64_call_membase(inst,basereg,disp) x64_call_membase_size(inst,basereg,disp,8) +#define x64_call_code(inst,target) x64_call_code_size(inst,target,8) +//#define x64_ret(inst) x64_ret_size(inst,8) +#define x64_ret_imm(inst,imm) x64_ret_imm_size(inst,imm,8) +#define x64_cmov_reg(inst,cond,is_signed,dreg,reg) x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8) +#define x64_cmov_mem(inst,cond,is_signed,reg,mem) x64_cmov_mem_size(inst,cond,is_signed,reg,mem,8) +#define x64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8) +#define x64_enter(inst,framesize) x64_enter_size(inst,framesize) +//#define x64_leave(inst) x64_leave_size(inst,8) +#define x64_sahf(inst) x64_sahf_size(inst,8) +#define x64_fsin(inst) x64_fsin_size(inst,8) +#define x64_fcos(inst) x64_fcos_size(inst,8) +#define x64_fabs(inst) x64_fabs_size(inst,8) +#define x64_ftst(inst) x64_ftst_size(inst,8) +#define x64_fxam(inst) x64_fxam_size(inst,8) +#define x64_fpatan(inst) x64_fpatan_size(inst,8) +#define x64_fprem(inst) x64_fprem_size(inst,8) +#define x64_fprem1(inst) x64_fprem1_size(inst,8) +#define x64_frndint(inst) x64_frndint_size(inst,8) +#define x64_fsqrt(inst) x64_fsqrt_size(inst,8) +#define x64_fptan(inst) x64_fptan_size(inst,8) +#define x64_padding(inst,size) x64_padding_size(inst,size) +#define x64_prolog(inst,frame,reg_mask) x64_prolog_size(inst,frame,reg_mask,8) +#define x64_epilog(inst,reg_mask) x64_epilog_size(inst,reg_mask,8) + +#endif // X64_H -- cgit v1.1 From 9527abbd5eb3166d290da4e3b8e16855fabdafaf Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 12:34:17 +0200 Subject: Replace glib.h with stdint.h for standard C99 --- x64/x64-codegen.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 5bc438c..cd450c7 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -16,7 +16,7 @@ #ifndef X64_H #define X64_H -#include +#include typedef enum { X64_RAX = 0, @@ -74,13 +74,13 @@ typedef enum #elif defined(__native_client_codegen__) -#define x64_codegen_pre(inst) guint8* _codegen_start = (inst); x64_nacl_instruction_pre(); +#define x64_codegen_pre(inst) uint8_t* _codegen_start = (inst); x64_nacl_instruction_pre(); #define x64_codegen_post(inst) (x64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); /* Because of rex prefixes, etc, call sequences are not constant size. */ /* These pre- and post-sequence hooks remedy this by aligning the call */ /* sequence after we emit it, since we will know the exact size then. */ -#define x64_call_sequence_pre(inst) guint8* _code_start = (inst); +#define x64_call_sequence_pre(inst) uint8_t* _code_start = (inst); #define x64_call_sequence_post(inst) \ (mono_nacl_align_call(&_code_start, &(inst)), _code_start); @@ -160,7 +160,7 @@ typedef enum #endif typedef union { - guint64 val; + uint64_t val; unsigned char b [8]; } x64_imm_buf; @@ -182,12 +182,12 @@ typedef union { #define x64_sib_index(sib) (((sib) >> 3) & 0x7) #define x64_sib_base(sib) ((sib) & 0x7) -#define x64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1)) +#define x64_is_imm32(val) ((int64_t)val >= -((int64_t)1<<31) && (int64_t)val <= (((int64_t)1<<31)-1)) #define x86_imm_emit64(inst,imm) \ do { \ x64_imm_buf imb; \ - imb.val = (guint64) (imm); \ + imb.val = (uint64_t) (imm); \ *(inst)++ = imb.b [0]; \ *(inst)++ = imb.b [1]; \ *(inst)++ = imb.b [2]; \ @@ -493,15 +493,15 @@ typedef union { x64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ - x86_imm_emit64 ((inst), (guint64)(imm)); \ + x86_imm_emit64 ((inst), (uint64_t)(imm)); \ else \ - x86_imm_emit32 ((inst), (int)(guint64)(imm)); \ + x86_imm_emit32 ((inst), (int)(uint64_t)(imm)); \ x64_codegen_post(inst); \ } while (0) #define x64_mov_reg_imm(inst,reg,imm) \ do { \ - int _x64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \ + int _x64_width_temp = ((uint64_t)(imm) == (uint64_t)(int)(uint64_t)(imm)); \ x64_codegen_pre(inst); \ x64_mov_reg_imm_size ((inst), (reg), (imm), (_x64_width_temp ? 4 : 8)); \ x64_codegen_post(inst); \ @@ -813,11 +813,11 @@ typedef union { #define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) #define x64_jump_code_size(inst,target,size) do { \ - if (x64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \ + if (x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))) { \ x86_jump_code((inst),(target)); \ } else { \ x64_jump_membase ((inst), X64_RIP, 0); \ - *(guint64*)(inst) = (guint64)(target); \ + *(uint64_t*)(inst) = (uint64_t)(target); \ (inst) += 8; \ } \ } while (0) @@ -902,9 +902,9 @@ typedef union { do { \ /* x86_jump_code used twice in case of */ \ /* relocation by x64_codegen_post */ \ - guint8* jump_start; \ + uint8_t* jump_start; \ x64_codegen_pre(inst); \ - assert(x64_is_imm32 ((gint64)(target) - (gint64)(inst))); \ + assert(x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))); \ x86_jump_code((inst),(target)); \ inst = x64_codegen_post(inst); \ jump_start = (inst); \ @@ -1563,7 +1563,7 @@ typedef union { do { \ /* x64_branch_size_body used twice in */ \ /* case of relocation by x64_codegen_post */ \ - guint8* branch_start; \ + uint8_t* branch_start; \ x64_codegen_pre(inst); \ x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ inst = x64_codegen_post(inst); \ @@ -1603,8 +1603,8 @@ typedef union { #define x64_call_code_size(inst,target,size) \ do { \ x64_codegen_pre((inst)); \ - guint8* adjusted_start; \ - guint8* call_start; \ + uint8_t* adjusted_start; \ + uint8_t* call_start; \ x64_call_sequence_pre((inst)); \ x86_call_code((inst),(target)); \ adjusted_start = x64_call_sequence_post((inst)); \ -- cgit v1.1 From d1896e187eed7dbdd18ba34c0fe68025c678c18c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 19:02:43 +0200 Subject: If nothing is defined, then it's "__default_codegen__" --- x64/x64-codegen.h | 214 +++++++++++++++++++++++++++--------------------------- x86/x86-codegen.h | 13 ++-- 2 files changed, 114 insertions(+), 113 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index cd450c7..23bdcbd 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -16,8 +16,11 @@ #ifndef X64_H #define X64_H +#include "../x86/x86-codegen.h" + #include +/* x86-64 general purpose registers */ typedef enum { X64_RAX = 0, X64_RCX = 1, @@ -39,6 +42,7 @@ typedef enum { X64_NREG } X64_Reg_No; +/* x86-64 XMM registers */ typedef enum { X64_XMM0 = 0, X64_XMM1 = 1, @@ -67,12 +71,7 @@ typedef enum X64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */ } X64_REX_Bits; -#if defined(__default_codegen__) - -#define x64_codegen_pre(inst) -#define x64_codegen_post(inst) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) #define x64_codegen_pre(inst) uint8_t* _codegen_start = (inst); x64_nacl_instruction_pre(); #define x64_codegen_post(inst) (x64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start); @@ -90,8 +89,12 @@ typedef enum #define x64_is_valid_nacl_base(reg) \ ((reg) == X64_RIP || (reg) == X64_R15 || \ (reg) == X64_RBP || (reg) == X64_RSP) +#else -#endif /*__native_client_codegen__*/ +#define x64_codegen_pre(inst) +#define x64_codegen_post(inst) + +#endif /* __native_client_codegen__ */ #ifdef TARGET_WIN32 #define X64_ARG_REG1 X64_RCX @@ -106,37 +109,38 @@ typedef enum #endif #ifdef TARGET_WIN32 -#define X64_CALLEE_REGS ((1< 7) ? X64_REX_R : 0) | \ (((reg_index) > 7) ? X64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \ + x64_nacl_tag_rex((inst)); \ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \ } while (0) -#elif defined(__native_client_codegen__) +#else #define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \ { \ unsigned char _x64_rex_bits = \ @@ -154,18 +159,15 @@ typedef enum (((reg_modrm) > 7) ? X64_REX_R : 0) | \ (((reg_index) > 7) ? X64_REX_X : 0) | \ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \ - x64_nacl_tag_rex((inst)); \ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \ } while (0) -#endif +#endif /* __native_client_codegen__ */ typedef union { uint64_t val; - unsigned char b [8]; + unsigned char b[8]; } x64_imm_buf; -#include "../x86/x86-codegen.h" - /* In 64 bit mode, all registers have a low byte subregister */ #undef X86_IS_BYTE_REG #define X86_IS_BYTE_REG(reg) 1 @@ -233,15 +235,7 @@ typedef union { x86_reg_emit ((inst), (dreg), (reg)); \ } while (0) -#if defined(__default_codegen__) - -#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \ - x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) - -#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ - x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) /* NaCl modules may not directly update RSP or RBP other than direct copies */ /* between them. Instead the lower 4 bytes are updated and then added to R15 */ #define x64_is_nacl_stack_reg(reg) (((reg) == X64_RSP) || ((reg) == X64_RBP)) @@ -276,6 +270,14 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#else + +#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \ + x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)) + +#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \ + x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)) + #endif /*__native_client_codegen__*/ #define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size((inst),(opc),(reg),(imm),8) @@ -370,16 +372,16 @@ typedef union { x64_codegen_post(inst); \ } while (0) -#if defined(__default_codegen__) +#if defined(__native_client_codegen__) +/* We have to re-base memory reads because memory isn't zero based. */ #define x64_mov_reg_mem(inst,reg,mem,size) \ do { \ - x64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ + x64_mov_reg_membase((inst),(reg),X64_R15,(mem),(size)); \ } while (0) -#elif defined(__native_client_codegen__) -/* We have to re-base memory reads because memory isn't zero based. */ +#else #define x64_mov_reg_mem(inst,reg,mem,size) \ do { \ - x64_mov_reg_membase((inst),(reg),X64_R15,(mem),(size)); \ + x64_mov_reg_mem_body((inst),(reg),(mem),(size)); \ } while (0) #endif /* __native_client_codegen__ */ @@ -402,16 +404,7 @@ typedef union { x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \ } while (0) -#if defined(__default_codegen__) - -#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ - x64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) -#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \ - do { \ - x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ - } while (0) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) #define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ do { \ @@ -441,6 +434,15 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#else + +#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \ + x64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size)) +#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \ + do { \ + x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \ + } while (0) + #endif /*__native_client_codegen__*/ #define x64_movzx_reg_membase(inst,reg,basereg,disp,size) \ @@ -541,10 +543,7 @@ typedef union { x64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0) -#if defined(__default_codegen__) -#define x64_lea_membase(inst,reg,basereg,disp) \ - x64_lea_membase_body((inst), (reg), (basereg), (disp)) -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) /* NaCl modules may not write directly into RSP/RBP. Instead, use a */ /* 32-bit LEA and add R15 to the effective address */ #define x64_lea_membase(inst,reg,basereg,disp) \ @@ -562,6 +561,9 @@ typedef union { } \ x64_codegen_post(inst); \ } while (0) +#else +#define x64_lea_membase(inst,reg,basereg,disp) \ + x64_lea_membase_body((inst), (reg), (basereg), (disp)) #endif /*__native_client_codegen__*/ /* Instruction are implicitly 64-bits so don't generate REX for just the size. */ @@ -591,22 +593,7 @@ typedef union { x64_codegen_post(inst); \ } while (0) -#if defined(__default_codegen__) - -#define x64_call_reg(inst,reg) \ - do { \ - x64_emit_rex(inst, 0, 0, 0, (reg)); \ - *(inst)++ = (unsigned char)0xff; \ - x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ - } while (0) - - -#define x64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) -#define x64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) - -#define x64_pop_reg(inst,reg) x64_pop_reg_body((inst), (reg)) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) /* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */ #define x64_jump_reg_size(inst,reg,size) \ @@ -679,6 +666,21 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#else + +#define x64_call_reg(inst,reg) \ + do { \ + x64_emit_rex(inst, 0, 0, 0, (reg)); \ + *(inst)++ = (unsigned char)0xff; \ + x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ + } while (0) + + +#define x64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0) +#define x64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0) + +#define x64_pop_reg(inst,reg) x64_pop_reg_body((inst), (reg)) + #endif /*__native_client_codegen__*/ #define x64_movsd_reg_regp(inst,reg,regp) \ @@ -796,33 +798,7 @@ typedef union { x64_codegen_post(inst); \ } while (0) -#if defined (__default_codegen__) - -/* From the AMD64 Software Optimization Manual */ -#define x64_padding_size(inst,size) \ - do { \ - switch ((size)) { \ - case 1: *(inst)++ = 0x90; break; \ - case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ - default: x64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ - }; \ - } while (0) - -#define x64_call_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst),2, (basereg),(disp)); } while (0) -#define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) - -#define x64_jump_code_size(inst,target,size) do { \ - if (x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))) { \ - x86_jump_code((inst),(target)); \ - } else { \ - x64_jump_membase ((inst), X64_RIP, 0); \ - *(uint64_t*)(inst) = (uint64_t)(target); \ - (inst) += 8; \ - } \ -} while (0) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) /* The 3-7 byte NOP sequences in x64_padding_size below are all illegal in */ /* 64-bit Native Client because they load into rSP/rBP or use duplicate */ @@ -878,7 +854,6 @@ typedef union { g_assert(code_start + (size) == (unsigned char *)(inst)); \ } while (0) - /* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ #define x64_call_membase_size(inst,basereg,disp,size) \ do { \ @@ -912,6 +887,32 @@ typedef union { mono_x64_patch(jump_start, (target)); \ } while (0) +#else + +/* From the AMD64 Software Optimization Manual */ +#define x64_padding_size(inst,size) \ + do { \ + switch ((size)) { \ + case 1: *(inst)++ = 0x90; break; \ + case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \ + default: x64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \ + }; \ + } while (0) + +#define x64_call_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst),2, (basereg),(disp)); } while (0) +#define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0) + +#define x64_jump_code_size(inst,target,size) do { \ + if (x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))) { \ + x86_jump_code((inst),(target)); \ + } else { \ + x64_jump_membase ((inst), X64_RIP, 0); \ + *(uint64_t*)(inst) = (uint64_t)(target); \ + (inst) += 8; \ + } \ +} while (0) + #endif /*__native_client_codegen__*/ /* @@ -1547,18 +1548,19 @@ typedef union { #define x64_loopne_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); x64_codegen_post(inst); } while (0) #define x64_jump32_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); x64_codegen_post(inst); } while (0) #define x64_jump8_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); x64_codegen_post(inst); } while (0) + #if !defined( __native_client_codegen__ ) /* Defined above for Native Client, so they can be used in other macros */ #define x64_jump_reg_size(inst,reg,size) do { x64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0) #define x64_jump_mem_size(inst,mem,size) do { x64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0) #endif + #define x64_jump_disp_size(inst,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); x64_codegen_post(inst); } while (0) #define x64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0) #define x64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0) #define x64_branch_size_body(inst,cond,target,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); x64_codegen_post(inst); } while (0) -#if defined(__default_codegen__) -#define x64_branch_size(inst,cond,target,is_signed,size) do { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) -#elif defined(__native_client_codegen__) + +#if defined(__native_client_codegen__) #define x64_branch_size(inst,cond,target,is_signed,size) \ do { \ /* x64_branch_size_body used twice in */ \ @@ -1571,7 +1573,9 @@ typedef union { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \ mono_x64_patch(branch_start, (target)); \ } while (0) -#endif +#else +#define x64_branch_size(inst,cond,target,is_signed,size) do { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0) +#endif /* __native_client_codegen__ */ #define x64_branch_disp_size(inst,cond,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); x64_codegen_post(inst); } while (0) #define x64_set_reg_size(inst,cond,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0) @@ -1580,12 +1584,7 @@ typedef union { //#define x64_call_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0) #define x64_call_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); x64_codegen_post(inst); } while (0) -#if defined(__default_codegen__) - -#define x64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) -#define x64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) - -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) /* Size is ignored for Native Client calls, we restrict jumping to 32-bits */ #define x64_call_imm_size(inst,disp,size) \ do { \ @@ -1614,6 +1613,11 @@ typedef union { mono_x64_patch(call_start, (target)); \ } while (0) +#else + +#define x64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0) +#define x64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0) + #endif /*__native_client_codegen__*/ //#define x64_ret_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); x64_codegen_post(inst); } while (0) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index ad6282f..2c5b7e3 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -14,15 +14,16 @@ #ifndef X86_H #define X86_H + #include #ifdef __native_client_codegen__ extern gint8 nacl_align_byte; #endif /* __native_client_codegen__ */ - #if defined( __native_client_codegen__ ) && defined( TARGET_X86 ) #define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0) + #define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); #define x86_call_sequence_post_val(inst) \ (mono_nacl_align_call(&_code_start, &(inst)), _code_start); @@ -30,6 +31,7 @@ extern gint8 nacl_align_byte; #define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst)) #else #define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0) + /* Two variants are needed to avoid warnings */ #define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst); #define x86_call_sequence_post_val(inst) _code_start @@ -37,10 +39,7 @@ extern gint8 nacl_align_byte; #define x86_call_sequence_post(inst) #endif /* __native_client_codegen__ */ - -/* -// x86 register numbers -*/ +/* x86 32bit register numbers */ typedef enum { X86_EAX = 0, X86_ECX = 1, @@ -65,9 +64,7 @@ typedef enum { X86_XMM_NREG } X86_XMM_Reg_No; -/* -// opcodes for alu instructions -*/ +/* opcodes for ALU instructions */ typedef enum { X86_ADD = 0, X86_OR = 1, -- cgit v1.1 From 401348f4fd39a2ceee2c058091381697301193d2 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 8 Nov 2014 23:50:02 +0200 Subject: Replace register names with new definitions --- src/codegen.c | 38 +++++++-------- src/codegen_sse.h | 137 +++++++++++++++++++++--------------------------------- 2 files changed, 73 insertions(+), 102 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 4e70cb1..72ab6ef 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -156,9 +156,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* assign loop counter register */ loop_count = 4 * p->i0; #ifdef _M_X64 - MOV_I(&fp, EBX, loop_count); + MOV_I(&fp, X86_EBX, loop_count); #else - MOV_I(&fp, ECX, loop_count); + MOV_I(&fp, X86_ECX, loop_count); #endif #endif @@ -207,13 +207,13 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* generate function */ /* clear */ - XOR2(&fp, EAX, EAX); + XOR2(&fp, X86_EAX, X86_EAX); /* set "pointer" to offsets */ - MOV_D(&fp, RDI, RCX, 0, 0); + MOV_D(&fp, X64_RDI, X64_RCX, 0, 0); /* set "pointer" to constants */ - MOV_D(&fp, RSI, RCX, 0xE0, 0); + MOV_D(&fp, X64_RSI, X64_RCX, 0xE0, 0); /* align loop/jump destination */ ffts_align_mem16(&fp, 8); @@ -245,10 +245,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, EBX, loop_count); + MOV_I(&fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOV_I(&fp, ECX, loop_count); + MOV_I(&fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -298,10 +298,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, EBX, loop_count); + MOV_I(&fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOV_I(&fp, ECX, loop_count); + MOV_I(&fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -325,10 +325,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, EBX, loop_count); + MOV_I(&fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 8); #else - MOV_I(&fp, ECX, loop_count); + MOV_I(&fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 9); #endif @@ -352,17 +352,17 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if (!pN) { #ifdef _M_X64 - MOV_I(&fp, EBX, pps[0]); + MOV_I(&fp, X86_EBX, pps[0]); #else - MOV_I(&fp, ECX, pps[0] / 4); + MOV_I(&fp, X86_ECX, pps[0] / 4); #endif } else { int offset = (4 * pps[1]) - pAddr; if (offset) { #ifdef _M_X64 - ADD_I(&fp, R8, offset); + ADD_I(&fp, X64_R8, offset); #else - ADD_I(&fp, RDX, offset); + ADD_I(&fp, X64_RDX, offset); #endif } @@ -370,9 +370,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN); #ifdef _M_X64 - SHIFT(&fp, EBX, factor); + SHIFT(&fp, X86_EBX, factor); #else - SHIFT(&fp, ECX, factor); + SHIFT(&fp, X86_ECX, factor); #endif } } @@ -382,9 +382,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - ADD_I(&fp, RDI, offset); + ADD_I(&fp, X64_RDI, offset); #else - ADD_I(&fp, R8, offset); + ADD_I(&fp, X64_R8, offset); #endif } diff --git a/src/codegen_sse.h b/src/codegen_sse.h index fa67a32..6b01773 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -34,6 +34,8 @@ #ifndef FFTS_CODEGEN_SSE_H #define FFTS_CODEGEN_SSE_H +#include "arch/x64/x64-codegen.h" + #include #include @@ -61,32 +63,6 @@ extern const uint32_t sse_leaf_oo_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; extern const uint32_t sse_leaf_oe_offsets[8]; -#define EAX 0 -#define ECX 1 -#define EDX 2 -#define EBX 3 -#define ESP 4 -#define EBP 5 -#define ESI 6 -#define EDI 7 - -#define RAX 0 -#define RCX 1 -#define RDX 2 -#define RBX 3 -#define RSP 4 -#define RBP 5 -#define RSI 6 -#define RDI 7 -#define R8 8 -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 - #define XMM_REG 0x40 #define XMM0 (XMM_REG | 0x0) @@ -122,7 +98,7 @@ static FFTS_INLINE void ADDPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); } - /* esacape opcode */ + /* escape opcode */ *(*p)++ = 0x0F; /* opcode */ @@ -515,11 +491,6 @@ static int32_t READ_IMM32(uint8_t *p) return rval; } -static void RET(uint8_t **p) -{ - *(*p)++ = 0xc3; -} - static void SHIFT(uint8_t **p, uint8_t reg, int shift) { if (reg >= 8) { @@ -720,36 +691,36 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) { #ifdef _M_X64 /* restore nonvolatile registers */ - MOVDQA3(fp, XMM6, RSP, 0); - MOVDQA3(fp, XMM7, RSP, 16); - MOVDQA3(fp, XMM8, RSP, 32); - MOVDQA3(fp, XMM9, RSP, 48); - MOVDQA3(fp, XMM10, RSP, 64); - MOVDQA3(fp, XMM11, RSP, 80); - MOVDQA3(fp, XMM12, RSP, 96); - MOVDQA3(fp, XMM13, RSP, 112); - MOVDQA3(fp, XMM14, RSP, 128); - MOVDQA3(fp, XMM15, RSP, 144); + MOVDQA3(fp, XMM6, X64_RSP, 0); + MOVDQA3(fp, XMM7, X64_RSP, 16); + MOVDQA3(fp, XMM8, X64_RSP, 32); + MOVDQA3(fp, XMM9, X64_RSP, 48); + MOVDQA3(fp, XMM10, X64_RSP, 64); + MOVDQA3(fp, XMM11, X64_RSP, 80); + MOVDQA3(fp, XMM12, X64_RSP, 96); + MOVDQA3(fp, XMM13, X64_RSP, 112); + MOVDQA3(fp, XMM14, X64_RSP, 128); + MOVDQA3(fp, XMM15, X64_RSP, 144); /* restore stack */ - ADD_I(fp, RSP, 168); + ADD_I(fp, X64_RSP, 168); /* restore the last 3 registers from the shadow space */ - MOV_D(fp, RBX, RSP, 8, 0); - MOV_D(fp, RSI, RSP, 16, 0); - MOV_D(fp, RDI, RSP, 24, 0); + MOV_D(fp, X64_RBX, X64_RSP, 8, 0); + MOV_D(fp, X64_RSI, X64_RSP, 16, 0); + MOV_D(fp, X64_RDI, X64_RSP, 24, 0); #else - POP(fp, R15); - POP(fp, R14); - POP(fp, R13); - POP(fp, R12); - POP(fp, R11); - POP(fp, R10); - POP(fp, RBX); - POP(fp, RBP); + POP(fp, X64_R15); + POP(fp, X64_R14); + POP(fp, X64_R13); + POP(fp, X64_R12); + POP(fp, X64_R11); + POP(fp, X64_R10); + POP(fp, X64_RBX); + POP(fp, X64_RBP); #endif - RET(fp); + x64_ret(*fp); } static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) @@ -763,33 +734,33 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) /* save nonvolatile registers */ #ifdef _M_X64 /* use the shadow space to save first 3 registers */ - MOV_D(fp, RBX, RSP, 8, 1); - MOV_D(fp, RSI, RSP, 16, 1); - MOV_D(fp, RDI, RSP, 24, 1); + MOV_D(fp, X64_RBX, X64_RSP, 8, 1); + MOV_D(fp, X64_RSI, X64_RSP, 16, 1); + MOV_D(fp, X64_RDI, X64_RSP, 24, 1); /* reserve space.. */ - SUB_I(fp, RSP, 168); + SUB_I(fp, X64_RSP, 168); /* to save XMM6-XMM15 registers */ - MOVDQA3(fp, RSP, 0, XMM6); - MOVDQA3(fp, RSP, 16, XMM7); - MOVDQA3(fp, RSP, 32, XMM8); - MOVDQA3(fp, RSP, 48, XMM9); - MOVDQA3(fp, RSP, 64, XMM10); - MOVDQA3(fp, RSP, 80, XMM11); - MOVDQA3(fp, RSP, 96, XMM12); - MOVDQA3(fp, RSP, 112, XMM13); - MOVDQA3(fp, RSP, 128, XMM14); - MOVDQA3(fp, RSP, 144, XMM15); + MOVDQA3(fp, X64_RSP, 0, XMM6); + MOVDQA3(fp, X64_RSP, 16, XMM7); + MOVDQA3(fp, X64_RSP, 32, XMM8); + MOVDQA3(fp, X64_RSP, 48, XMM9); + MOVDQA3(fp, X64_RSP, 64, XMM10); + MOVDQA3(fp, X64_RSP, 80, XMM11); + MOVDQA3(fp, X64_RSP, 96, XMM12); + MOVDQA3(fp, X64_RSP, 112, XMM13); + MOVDQA3(fp, X64_RSP, 128, XMM14); + MOVDQA3(fp, X64_RSP, 144, XMM15); #else - PUSH(fp, RBP); - PUSH(fp, RBX); - PUSH(fp, R10); - PUSH(fp, R11); - PUSH(fp, R12); - PUSH(fp, R13); - PUSH(fp, R14); - PUSH(fp, R15); + PUSH(fp, X64_RBP); + PUSH(fp, X64_RBX); + PUSH(fp, X64_R10); + PUSH(fp, X64_R11); + PUSH(fp, X64_R12); + PUSH(fp, X64_R13); + PUSH(fp, X64_R14); + PUSH(fp, X64_R15); #endif return start; @@ -799,10 +770,10 @@ static FFTS_INLINE void generate_transform_init(insns_t **fp) { #ifdef _M_X64 /* generate function */ - MOVAPS2(fp, XMM3, RSI); + MOVAPS2(fp, XMM3, X64_RSI); /* set "pointer" to twiddle factors */ - MOV_D(fp, RDI, RCX, 0x20, 0); + MOV_D(fp, X64_RDI, X64_RCX, 0x20, 0); #else size_t len; @@ -854,10 +825,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) #ifdef _M_X64 /* input */ - MOV_R(fp, RDI, RAX, 1); + MOV_R(fp, X64_RDI, X64_RAX, 1); /* output */ - MOV_R(fp, R8, RCX, 1); + MOV_R(fp, X64_R8, X64_RCX, 1); /* lea rdx, [r8 + rbx] */ /* loop stop (output + output_stride) */ @@ -1053,7 +1024,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x50; /* input + 6 * input_stride */ - ADD_I(fp, RAX, 0x60); + ADD_I(fp, X64_RAX, 0x60); MULPS(fp, XMM13, XMM7); SUBPS(fp, XMM6, XMM15); @@ -1201,7 +1172,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0xFF; /* ret */ - RET(fp); + x64_ret(*fp); #else /* copy function */ assert((char*) x8_soft_end >= (char*) x8_soft); -- cgit v1.1 From ec158717d8a46def60917145b54b656d7a541eb2 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 00:11:07 +0200 Subject: Replace add/sub immediate value with x64_alu_reg_imm_size_body --- src/codegen.c | 8 ++++---- src/codegen_sse.h | 53 +++-------------------------------------------------- 2 files changed, 7 insertions(+), 54 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 72ab6ef..7814b04 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -360,9 +360,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (4 * pps[1]) - pAddr; if (offset) { #ifdef _M_X64 - ADD_I(&fp, X64_R8, offset); + x64_alu_reg_imm_size_body(fp, X86_ADD, X64_R8, offset, 8); #else - ADD_I(&fp, X64_RDX, offset); + x64_alu_reg_imm_size_body(fp, X86_ADD, X64_RDX, offset, 8); #endif } @@ -382,9 +382,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - ADD_I(&fp, X64_RDI, offset); + x64_alu_reg_imm_size_body(fp, X86_ADD, X64_RDI, offset, 8); #else - ADD_I(&fp, X64_R8, offset); + x64_alu_reg_imm_size_body(fp, X86_ADD, X64_R8, offset, 8); #endif } diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 6b01773..3c3a6ef 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -106,30 +106,6 @@ static FFTS_INLINE void ADDPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -/* Immediate */ -static void ADD_I(uint8_t **p, uint8_t dst, int32_t imm) -{ - if (dst >= 8) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } - - if (imm > 127 || imm <= -128) { - *(*p)++ = 0x81; - } else { - *(*p)++ = 0x83; - } - - *(*p)++ = 0xc0 | (dst & 0x7); - - if (imm > 127 || imm <= -128) { - IMM32(p, imm); - } else { - IMM8(p, imm); - } -} - static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) { if (disp == 0) { @@ -549,29 +525,6 @@ static FFTS_INLINE void SUBPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -static void SUB_I(uint8_t **p, uint8_t dst, int32_t imm) -{ - if (dst >= 8) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } - - if (imm > 127 || imm <= -128) { - *(*p)++ = 0x81; - } else { - *(*p)++ = 0x83; - } - - *(*p)++ = 0xe8 | (dst & 0x7); - - if (imm > 127 || imm <= -128) { - IMM32(p, imm); - } else { - IMM8(p, imm); - } -} - static FFTS_INLINE void XOR2(uint8_t **p, uint8_t reg1, uint8_t reg2) { uint8_t r1 = (reg1 & 7); @@ -703,7 +656,7 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) MOVDQA3(fp, XMM15, X64_RSP, 144); /* restore stack */ - ADD_I(fp, X64_RSP, 168); + x64_alu_reg_imm_size_body(*fp, X86_ADD, X64_RSP, 168, 8); /* restore the last 3 registers from the shadow space */ MOV_D(fp, X64_RBX, X64_RSP, 8, 0); @@ -739,7 +692,7 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) MOV_D(fp, X64_RDI, X64_RSP, 24, 1); /* reserve space.. */ - SUB_I(fp, X64_RSP, 168); + x64_alu_reg_imm_size_body(*fp, X86_SUB, X64_RSP, 168, 8); /* to save XMM6-XMM15 registers */ MOVDQA3(fp, X64_RSP, 0, XMM6); @@ -1024,7 +977,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x50; /* input + 6 * input_stride */ - ADD_I(fp, X64_RAX, 0x60); + x64_alu_reg_imm_size_body(*fp, X86_ADD, X64_RAX, 0x60, 8); MULPS(fp, XMM13, XMM7); SUBPS(fp, XMM6, XMM15); -- cgit v1.1 From 0a98074a2bbde2a3f190e9f32cfeebba594cbbf0 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 01:03:08 +0200 Subject: Replace MOV_I with x86_mov_reg_imm, SHIFT with x86_shift_reg_imm, CALL with x64_call_imm, POP with x64_pop_reg, PUSH with x64_push_reg --- src/codegen.c | 36 +++++++++------- src/codegen_sse.h | 123 ++++++++---------------------------------------------- 2 files changed, 39 insertions(+), 120 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 7814b04..d08be0d 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -156,9 +156,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* assign loop counter register */ loop_count = 4 * p->i0; #ifdef _M_X64 - MOV_I(&fp, X86_EBX, loop_count); + x86_mov_reg_imm(fp, X86_EBX, loop_count); #else - MOV_I(&fp, X86_ECX, loop_count); + x86_mov_reg_imm(fp, X86_ECX, loop_count); #endif #endif @@ -245,10 +245,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, X86_EBX, loop_count); + x86_mov_reg_imm(fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOV_I(&fp, X86_ECX, loop_count); + x86_mov_reg_imm(fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -298,10 +298,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, X86_EBX, loop_count); + x86_mov_reg_imm(fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 3); #else - MOV_I(&fp, X86_ECX, loop_count); + x86_mov_reg_imm(fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 4); #endif @@ -325,10 +325,10 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - MOV_I(&fp, X86_EBX, loop_count); + x86_mov_reg_imm(fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 8); #else - MOV_I(&fp, X86_ECX, loop_count); + x86_mov_reg_imm(fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 9); #endif @@ -352,9 +352,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if (!pN) { #ifdef _M_X64 - MOV_I(&fp, X86_EBX, pps[0]); + x86_mov_reg_imm(fp, X86_EBX, pps[0]); #else - MOV_I(&fp, X86_ECX, pps[0] / 4); + x86_mov_reg_imm(fp, X86_ECX, pps[0] / 4); #endif } else { int offset = (4 * pps[1]) - pAddr; @@ -370,9 +370,17 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN); #ifdef _M_X64 - SHIFT(&fp, X86_EBX, factor); + if (factor > 0) { + x86_shift_reg_imm(fp, X86_SHL, X86_EBX, factor); + } else { + x86_shift_reg_imm(fp, X86_SHR, X86_EBX, -factor); + } #else - SHIFT(&fp, X86_ECX, factor); + if (factor > 0) { + x86_shift_reg_imm(fp, X86_SHL, X86_ECX, factor); + } else { + x86_shift_reg_imm(fp, X86_SHR, X86_ECX, -factor); + } #endif } } @@ -389,9 +397,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N } if (pps[0] == 2 * leaf_N) { - CALL(&fp, x_4_addr); + x64_call_imm(fp, (char*) x_4_addr - ((char*) fp + 4)); } else { - CALL(&fp, x_8_addr); + x64_call_imm(fp, (char*) x_8_addr - ((char*) fp + 4)); } pAddr = 4 * pps[1]; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 3c3a6ef..c7351fc 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -119,26 +119,11 @@ static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) } } -static void CALL(uint8_t **p, uint8_t *func) -{ - *(*p)++ = 0xe8; - IMM32(p, func - *p - 4); -} - static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); } -static void IMM16(uint8_t **p, int32_t imm) -{ - int i; - - for (i = 0; i < 2; i++) { - *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); - } -} - static void IMM32(uint8_t **p, int32_t imm) { int i; @@ -368,33 +353,6 @@ static FFTS_INLINE void MOV_D(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t d } } -static void MOV_I(uint8_t **p, uint8_t dst, uint64_t imm) -{ - /* REX prefix */ - if (dst >= 8 || imm > UINT32_MAX) { - uint8_t val = 0x40; - - if (dst >= 8) { - val |= 1; - } - - if (imm > UINT32_MAX) { - val |= 8; - } - - *(*p)++ = val; - } - - /* opcode */ - *(*p)++ = 0xb8 | (dst & 0x7); - - if (imm > UINT32_MAX) { - IMM64(p, imm); - } else { - IMM32(p, imm); - } -} - static FFTS_INLINE void MOV_R(uint8_t **p, uint8_t reg1, uint8_t reg2, int is_store) { uint8_t r1 = (reg1 & 7); @@ -437,53 +395,6 @@ static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -static void POP(uint8_t **p, uint8_t reg) -{ - if (reg >= 8) { - *(*p)++ = 0x41; - } - - *(*p)++ = 0x58 | (reg & 7); -} - -static void PUSH(uint8_t **p, uint8_t reg) -{ - if (reg >= 8) { - *(*p)++ = 0x41; - } - - *(*p)++ = 0x50 | (reg & 7); -} - -static int32_t READ_IMM32(uint8_t *p) -{ - int32_t rval = 0; - int i; - - for (i = 0; i < 4; i++) { - rval |= *(p+i) << (8 * i); - } - - return rval; -} - -static void SHIFT(uint8_t **p, uint8_t reg, int shift) -{ - if (reg >= 8) { - *(*p)++ = 0x49; - } - - - *(*p)++ = 0xc1; - if (shift > 0) { - *(*p)++ = 0xe0 | (reg & 7); - *(*p)++ = (shift & 0xff); - } else { - *(*p)++ = 0xe8 | (reg & 7); - *(*p)++ = ((-shift) & 0xff); - } -} - static FFTS_INLINE void SHUFPS(uint8_t **p, uint8_t reg2, uint8_t reg1, const int select) { uint8_t r1 = (reg1 & 7); @@ -662,15 +573,15 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) MOV_D(fp, X64_RBX, X64_RSP, 8, 0); MOV_D(fp, X64_RSI, X64_RSP, 16, 0); MOV_D(fp, X64_RDI, X64_RSP, 24, 0); -#else - POP(fp, X64_R15); - POP(fp, X64_R14); - POP(fp, X64_R13); - POP(fp, X64_R12); - POP(fp, X64_R11); - POP(fp, X64_R10); - POP(fp, X64_RBX); - POP(fp, X64_RBP); +#else + x64_pop_reg(*fp, X64_R15); + x64_pop_reg(*fp, X64_R14); + x64_pop_reg(*fp, X64_R13); + x64_pop_reg(*fp, X64_R12); + x64_pop_reg(*fp, X64_R11); + x64_pop_reg(*fp, X64_R10); + x64_pop_reg(*fp, X64_RBX); + x64_pop_reg(*fp, X64_RBP); #endif x64_ret(*fp); @@ -706,14 +617,14 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) MOVDQA3(fp, X64_RSP, 128, XMM14); MOVDQA3(fp, X64_RSP, 144, XMM15); #else - PUSH(fp, X64_RBP); - PUSH(fp, X64_RBX); - PUSH(fp, X64_R10); - PUSH(fp, X64_R11); - PUSH(fp, X64_R12); - PUSH(fp, X64_R13); - PUSH(fp, X64_R14); - PUSH(fp, X64_R15); + x64_push_reg(*fp, X64_RBP); + x64_push_reg(*fp, X64_RBX); + x64_push_reg(*fp, X64_R10); + x64_push_reg(*fp, X64_R11); + x64_push_reg(*fp, X64_R12); + x64_push_reg(*fp, X64_R13); + x64_push_reg(*fp, X64_R14); + x64_push_reg(*fp, X64_R15); #endif return start; -- cgit v1.1 From c82441c33c3527d1b13f7779c52d58e477f36a93 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 01:42:51 +0200 Subject: Replace XOR2 with x86_clear_reg, MOV_D with x64_mov_membase_reg/x86_mov_reg_membase, MOV_R with x64_mov_reg_reg and x64_alu_reg_imm_size_body with x64_alu_reg_imm_size --- src/codegen.c | 14 +++--- src/codegen_sse.h | 124 ++++++------------------------------------------------ 2 files changed, 19 insertions(+), 119 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index d08be0d..92f7553 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -207,13 +207,13 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* generate function */ /* clear */ - XOR2(&fp, X86_EAX, X86_EAX); + x86_clear_reg(fp, X86_EAX); /* set "pointer" to offsets */ - MOV_D(&fp, X64_RDI, X64_RCX, 0, 0); + x64_mov_reg_membase(fp, X64_RDI, X64_RCX, 0x0, 8); /* set "pointer" to constants */ - MOV_D(&fp, X64_RSI, X64_RCX, 0xE0, 0); + x64_mov_reg_membase(fp, X64_RSI, X64_RCX, 0xE0, 8); /* align loop/jump destination */ ffts_align_mem16(&fp, 8); @@ -360,9 +360,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (4 * pps[1]) - pAddr; if (offset) { #ifdef _M_X64 - x64_alu_reg_imm_size_body(fp, X86_ADD, X64_R8, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); #else - x64_alu_reg_imm_size_body(fp, X86_ADD, X64_RDX, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_RDX, offset, 8); #endif } @@ -390,9 +390,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - x64_alu_reg_imm_size_body(fp, X86_ADD, X64_RDI, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_RDI, offset, 8); #else - x64_alu_reg_imm_size_body(fp, X86_ADD, X64_R8, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); #endif } diff --git a/src/codegen_sse.h b/src/codegen_sse.h index c7351fc..f30933e 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -106,19 +106,6 @@ static FFTS_INLINE void ADDPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -static void ADDRMODE(uint8_t **p, uint8_t reg, uint8_t rm, int32_t disp) -{ - if (disp == 0) { - *(*p)++ = (rm & 7) | ((reg & 7) << 3); - } else if (disp <= 127 || disp >= -128) { - *(*p)++ = 0x40 | (rm & 7) | ((reg & 7) << 3); - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | (rm & 7) | ((reg & 7) << 3); - IMM32(p, disp); - } -} - static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); @@ -151,13 +138,6 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static void LEA(uint8_t **p, uint8_t dst, uint8_t base, int32_t disp) -{ - *(*p)++ = 0x48 | ((base & 0x8) >> 3) | ((dst & 0x8) >> 1); - *(*p)++ = 0x8d; - ADDRMODE(p, dst, base, disp); -} - static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { uint8_t r1 = (reg1 & 7); @@ -311,72 +291,6 @@ static FFTS_INLINE void MOVDQA3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t } } -static FFTS_INLINE void MOV_D(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x49; - } else { - *(*p)++ = 0x48; - } - - if (is_store) { - *(*p)++ = 0x89; - } else { - *(*p)++ = 0x8B; - } - - if (disp == 0) { - *(*p)++ = r2 | (r1 << 3); - - if (r2 == 4) { - *(*p)++ = 0x24; - } - } else if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r2 | (r1 << 3); - - if (r2 == 4) { - *(*p)++ = 0x24; - } - - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r2 | (r1 << 3) | (r1 << 11); - - if (r2 == 4) { - *(*p)++ = 0x24; - } - - IMM32(p, disp); - } -} - -static FFTS_INLINE void MOV_R(uint8_t **p, uint8_t reg1, uint8_t reg2, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x48 | ((reg2 & 8) >> 3) | ((reg1 & 8) >> 1); - } else { - *(*p)++ = 0x48; - } - - if (is_store) { - *(*p)++ = 0x89; - } else { - *(*p)++ = 0x8B; - } - - *(*p)++ = 0xC0 | r2 | (r1 << 3); - - if (r2 == 4) { - *(*p)++ = 0x24; - } -} - static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg2, uint8_t reg1) { uint8_t r1 = (reg1 & 7); @@ -436,20 +350,6 @@ static FFTS_INLINE void SUBPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -static FFTS_INLINE void XOR2(uint8_t **p, uint8_t reg1, uint8_t reg2) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - *(*p)++ = 0x31; - *(*p)++ = 0xC0 | r2 | (r1 << 3); -} - static FFTS_INLINE void XORPS(uint8_t **p, uint8_t reg2, uint8_t reg1) { uint8_t r1 = (reg1 & 7); @@ -567,12 +467,12 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) MOVDQA3(fp, XMM15, X64_RSP, 144); /* restore stack */ - x64_alu_reg_imm_size_body(*fp, X86_ADD, X64_RSP, 168, 8); + x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 168, 8); /* restore the last 3 registers from the shadow space */ - MOV_D(fp, X64_RBX, X64_RSP, 8, 0); - MOV_D(fp, X64_RSI, X64_RSP, 16, 0); - MOV_D(fp, X64_RDI, X64_RSP, 24, 0); + x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, 8, 8); + x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, 16, 8); + x64_mov_reg_membase(*fp, X64_RDI, X64_RSP, 24, 8); #else x64_pop_reg(*fp, X64_R15); x64_pop_reg(*fp, X64_R14); @@ -598,12 +498,12 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) /* save nonvolatile registers */ #ifdef _M_X64 /* use the shadow space to save first 3 registers */ - MOV_D(fp, X64_RBX, X64_RSP, 8, 1); - MOV_D(fp, X64_RSI, X64_RSP, 16, 1); - MOV_D(fp, X64_RDI, X64_RSP, 24, 1); + x64_mov_membase_reg(*fp, X64_RSP, 8, X64_RBX, 8); + x64_mov_membase_reg(*fp, X64_RSP, 16, X64_RSI, 8); + x64_mov_membase_reg(*fp, X64_RSP, 24, X64_RDI, 8); /* reserve space.. */ - x64_alu_reg_imm_size_body(*fp, X86_SUB, X64_RSP, 168, 8); + x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 168, 8); /* to save XMM6-XMM15 registers */ MOVDQA3(fp, X64_RSP, 0, XMM6); @@ -637,7 +537,7 @@ static FFTS_INLINE void generate_transform_init(insns_t **fp) MOVAPS2(fp, XMM3, X64_RSI); /* set "pointer" to twiddle factors */ - MOV_D(fp, X64_RDI, X64_RCX, 0x20, 0); + x64_mov_reg_membase(*fp, X64_RDI, X64_RCX, 0x20, 8); #else size_t len; @@ -689,10 +589,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) #ifdef _M_X64 /* input */ - MOV_R(fp, X64_RDI, X64_RAX, 1); + x64_mov_reg_reg(*fp, X64_RAX, X64_RDI, 8); /* output */ - MOV_R(fp, X64_R8, X64_RCX, 1); + x64_mov_reg_reg(*fp, X64_RCX, X64_R8, 8); /* lea rdx, [r8 + rbx] */ /* loop stop (output + output_stride) */ @@ -888,7 +788,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x50; /* input + 6 * input_stride */ - x64_alu_reg_imm_size_body(*fp, X86_ADD, X64_RAX, 0x60, 8); + x64_alu_reg_imm_size(*fp, X86_ADD, X64_RAX, 0x60, 8); MULPS(fp, XMM13, XMM7); SUBPS(fp, XMM6, XMM15); -- cgit v1.1 From 757b6eb0e7a5742ffd23b721f2fe2e041ffd1069 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 01:44:32 +0200 Subject: Again default to "__default_codegen__" --- x86/x86-codegen.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index 2c5b7e3..fec8ccb 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -490,12 +490,7 @@ typedef union { #define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0) #define x86_movsd(inst) x86_movsl((inst)) -#if defined(__default_codegen__) -#define x86_prefix(inst,p) \ - do { \ - *(inst)++ =(unsigned char) (p); \ - } while (0) -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) #if defined(TARGET_X86) /* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */ /* This keeps us from having to call x86_codegen_pre with specific */ @@ -517,6 +512,11 @@ typedef union { #endif /* TARGET_AMD64 */ +#else +#define x86_prefix(inst,p) \ + do { \ + *(inst)++ =(unsigned char) (p); \ + } while (0) #endif /* __native_client_codegen__ */ #define x86_rdtsc(inst) \ -- cgit v1.1 From ad38065bfe4371c489ce9a8afe1c7c3ff42083c1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 11:22:16 +0200 Subject: Replace XORPS with x64_sse_xorps_reg_reg --- src/codegen_sse.h | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index f30933e..da1d1ed 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -350,24 +350,6 @@ static FFTS_INLINE void SUBPS(uint8_t **p, uint8_t reg2, uint8_t reg1) *(*p)++ = 0xC0 | r1 | (r2 << 3); } -static FFTS_INLINE void XORPS(uint8_t **p, uint8_t reg2, uint8_t reg1) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0x57; - *(*p)++ = 0xC0 | r1 | (r2 << 3); -} - static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) { if (count >= 9) { @@ -720,7 +702,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x2C; *(*fp)++ = 0x71; - XORPS(fp, XMM11, XMM3); + x64_sse_xorps_reg_reg(*fp, X64_XMM11, X64_XMM3); /* movaps xmm14, [rax + 0x30] */ /* input + 3 * input_stride */ @@ -797,7 +779,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SUBPS(fp, XMM0, XMM12); ADDPS(fp, XMM5, XMM12); SHUFPS(fp, XMM7, XMM7, 0xB1); - XORPS(fp, XMM6, XMM3); + x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); + SHUFPS(fp, XMM8, XMM8, 0xB1); /* movaps xmm12, xmm2 */ @@ -850,7 +833,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SUBPS(fp, XMM13, XMM10); ADDPS(fp, XMM11, XMM10); - XORPS(fp, XMM13, XMM3); + x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); ADDPS(fp, XMM4, XMM11); SUBPS(fp, XMM14, XMM11); SHUFPS(fp, XMM13, XMM13, 0xB1); -- cgit v1.1 From 60a12c6b54641415eaf3a1590ecb87804ae7c7b9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 11:30:15 +0200 Subject: Replace ADDPS with x64_sse_addps_reg_reg --- src/codegen_sse.h | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index da1d1ed..ed81d1e 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -88,24 +88,6 @@ extern const uint32_t sse_leaf_oe_offsets[8]; static void IMM8(uint8_t **p, int32_t imm); static void IMM32(uint8_t **p, int32_t imm); -static FFTS_INLINE void ADDPS(uint8_t **p, uint8_t reg2, uint8_t reg1) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* escape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0x58; - *(*p)++ = 0xC0 | r1 | (r2 << 3); -} - static void IMM8(uint8_t **p, int32_t imm) { *(*p)++ = (imm & 0xff); @@ -656,7 +638,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x28; *(*fp)++ = 0xD3; - ADDPS(fp, XMM9, XMM8); + x64_sse_addps_reg_reg(*fp, X64_XMM9, X64_XMM8); /* movaps xmm15, [rax + 0x20] */ /* input + 2 * input_stride */ @@ -666,7 +648,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x78; *(*fp)++ = 0x20; - ADDPS(fp, XMM10, XMM9); + x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); SUBPS(fp, XMM11, XMM9); /* movaps xmm5, [rcx] */ @@ -714,7 +696,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SUBPS(fp, XMM2, XMM10); MULPS(fp, XMM6, XMM12); - ADDPS(fp, XMM5, XMM10); + x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM10); + MULPS(fp, XMM15, XMM13); /* movaps xmm10, [rax + 0x40] */ @@ -734,7 +717,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) MULPS(fp, XMM12, XMM14); MULPS(fp, XMM14, XMM13); SUBPS(fp, XMM6, XMM12); - ADDPS(fp, XMM15, XMM14); + x64_sse_addps_reg_reg(*fp, X64_XMM15, X64_XMM14); /* movaps xmm7, [rcx + r10] */ *(*fp)++ = 0x42; @@ -774,10 +757,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) MULPS(fp, XMM13, XMM7); SUBPS(fp, XMM6, XMM15); - ADDPS(fp, XMM12, XMM15); + x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM15); MULPS(fp, XMM10, XMM8); SUBPS(fp, XMM0, XMM12); - ADDPS(fp, XMM5, XMM12); + x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM12); SHUFPS(fp, XMM7, XMM7, 0xB1); x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); @@ -792,7 +775,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) MULPS(fp, XMM7, XMM9); MULPS(fp, XMM9, XMM8); SUBPS(fp, XMM13, XMM7); - ADDPS(fp, XMM10, XMM9); + x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); /* movaps xmm4, [rcx + rbx] */ /* output + 1 * output_stride */ @@ -809,9 +792,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0xCC; SHUFPS(fp, XMM6, XMM6, 0xB1); - ADDPS(fp, XMM1, XMM11); + x64_sse_addps_reg_reg(*fp, X64_XMM1, X64_XMM11); SUBPS(fp, XMM4, XMM11); - ADDPS(fp, XMM12, XMM6); + x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM6); SUBPS(fp, XMM2, XMM6); /* movaps xmm11, xmm13 */ @@ -832,9 +815,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0xF1; SUBPS(fp, XMM13, XMM10); - ADDPS(fp, XMM11, XMM10); + x64_sse_addps_reg_reg(*fp, X64_XMM11, X64_XMM10); x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); - ADDPS(fp, XMM4, XMM11); + x64_sse_addps_reg_reg(*fp, X64_XMM4, X64_XMM11); SUBPS(fp, XMM14, XMM11); SHUFPS(fp, XMM13, XMM13, 0xB1); @@ -859,7 +842,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x59; SUBPS(fp, XMM1, XMM13); - ADDPS(fp, XMM6, XMM13); + x64_sse_addps_reg_reg(*fp, X64_XMM6, X64_XMM13); /* movaps [rcx + rsi], xmm1 */ /* output + 3 * output_stride */ -- cgit v1.1 From dfc676c824a4f07f0243f90be60168336a2a33b8 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 11:40:32 +0200 Subject: Replace SUBPS with x64_sse_subps_reg_reg --- src/codegen_sse.h | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index ed81d1e..fb4fbfc 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -314,24 +314,6 @@ static FFTS_INLINE void SHUFPS(uint8_t **p, uint8_t reg2, uint8_t reg1, const in *(*p)++ = (select & 0xFF); } -static FFTS_INLINE void SUBPS(uint8_t **p, uint8_t reg2, uint8_t reg1) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0x5C; - *(*p)++ = 0xC0 | r1 | (r2 << 3); -} - static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) { if (count >= 9) { @@ -629,7 +611,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SHUFPS(fp, XMM6, XMM6, 0xB1); MULPS(fp, XMM6, XMM8); SHUFPS(fp, XMM7, XMM7, 0xB1); - SUBPS(fp, XMM11, XMM6); + x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM6); MULPS(fp, XMM8, XMM7); /* movaps xmm10, xmm11 */ @@ -649,7 +631,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x20; x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); - SUBPS(fp, XMM11, XMM9); + x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM9); /* movaps xmm5, [rcx] */ /* output + 0 * output_stride */ @@ -694,7 +676,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x70; *(*fp)++ = 0x30; - SUBPS(fp, XMM2, XMM10); + x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM10); MULPS(fp, XMM6, XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM10); @@ -716,7 +698,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SHUFPS(fp, XMM13, XMM13, 0xB1); MULPS(fp, XMM12, XMM14); MULPS(fp, XMM14, XMM13); - SUBPS(fp, XMM6, XMM12); + x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM15, X64_XMM14); /* movaps xmm7, [rcx + r10] */ @@ -756,10 +738,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_alu_reg_imm_size(*fp, X86_ADD, X64_RAX, 0x60, 8); MULPS(fp, XMM13, XMM7); - SUBPS(fp, XMM6, XMM15); + x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM15); x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM15); MULPS(fp, XMM10, XMM8); - SUBPS(fp, XMM0, XMM12); + x64_sse_subps_reg_reg(*fp, X64_XMM0, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM12); SHUFPS(fp, XMM7, XMM7, 0xB1); x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); @@ -774,7 +756,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) MULPS(fp, XMM7, XMM9); MULPS(fp, XMM9, XMM8); - SUBPS(fp, XMM13, XMM7); + x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM7); x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); /* movaps xmm4, [rcx + rbx] */ @@ -793,9 +775,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SHUFPS(fp, XMM6, XMM6, 0xB1); x64_sse_addps_reg_reg(*fp, X64_XMM1, X64_XMM11); - SUBPS(fp, XMM4, XMM11); + x64_sse_subps_reg_reg(*fp, X64_XMM4, X64_XMM11); x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM6); - SUBPS(fp, XMM2, XMM6); + x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM6); /* movaps xmm11, xmm13 */ *(*fp)++ = 0x45; @@ -814,11 +796,11 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x28; *(*fp)++ = 0xF1; - SUBPS(fp, XMM13, XMM10); + x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM10); x64_sse_addps_reg_reg(*fp, X64_XMM11, X64_XMM10); x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); x64_sse_addps_reg_reg(*fp, X64_XMM4, X64_XMM11); - SUBPS(fp, XMM14, XMM11); + x64_sse_subps_reg_reg(*fp, X64_XMM14, X64_XMM11); SHUFPS(fp, XMM13, XMM13, 0xB1); /* movaps [rcx], xmm5 */ @@ -841,7 +823,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x14; *(*fp)++ = 0x59; - SUBPS(fp, XMM1, XMM13); + x64_sse_subps_reg_reg(*fp, X64_XMM1, X64_XMM13); x64_sse_addps_reg_reg(*fp, X64_XMM6, X64_XMM13); /* movaps [rcx + rsi], xmm1 */ -- cgit v1.1 From d9778032d254280fa6c19ce3198fe5c10b7cf0dd Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 11:53:02 +0200 Subject: Replace MOVAPS with x64_sse_movaps_reg_membase --- src/codegen_sse.h | 77 +------------------------------------------------------ 1 file changed, 1 insertion(+), 76 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index fb4fbfc..4d325bd 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -120,81 +120,6 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static FFTS_INLINE void MOVAPS(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - if (is_store) { - *(*p)++ = 0x29; - } else { - *(*p)++ = 0x28; - } - - r = r1 | (r2 << 3); - - if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { - assert(disp == 0); - *(*p)++ = 0xC0 | r; - } else { - assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); - - if (disp == 0 && r1 != 5) { - *(*p)++ = r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - } else { - if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - - IMM32(p, disp); - } - } - } -} - -static FFTS_INLINE void MOVAPS2(uint8_t **p, uint8_t reg1, uint8_t reg2) -{ - if (reg1 & XMM_REG) { - MOVAPS(p, reg2, reg1, 0, 0); - } else { - MOVAPS(p, reg1, reg2, 0, 1); - } -} - -static FFTS_INLINE void MOVAPS3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) -{ - if (reg1 & XMM_REG) { - MOVAPS(p, (uint8_t) op2, reg1, op3, 0); - } else { - MOVAPS(p, reg1, (uint8_t) op3, op2, 1); - } -} - static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) { uint8_t r1 = (reg1 & 7); @@ -480,7 +405,7 @@ static FFTS_INLINE void generate_transform_init(insns_t **fp) { #ifdef _M_X64 /* generate function */ - MOVAPS2(fp, XMM3, X64_RSI); + x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_RSI, 0); /* set "pointer" to twiddle factors */ x64_mov_reg_membase(*fp, X64_RDI, X64_RCX, 0x20, 8); -- cgit v1.1 From 89560561f49db1b5ed838b934256a2e38566da88 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 12:10:42 +0200 Subject: Add SSE opcode "movdqa" --- x64/x64-codegen.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 23bdcbd..3fb7104 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -1401,6 +1401,15 @@ typedef union { #define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) +#define x64_sse_movdqa_membase_reg(inst, basereg, disp, reg) \ + emit_sse_membase_reg((inst), (basereg), (disp), (reg), 0x66, 0x0f, 0x7f) + +#define x64_sse_movdqa_reg_membase(inst, dreg, basereg, disp) \ + emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6f) + +#define x64_sse_movdqa_reg_reg(inst, dreg, reg) \ + emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6f) + /* Generated from x86-codegen.h */ #define x64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0) -- cgit v1.1 From f8aba2b89af3b1f33ce7d177ada60301c9c9ed3d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 12:12:35 +0200 Subject: Replace MOVDQA with x64_sse_movdqa_reg_membase/64_sse_movdqa_membase_reg --- src/codegen_sse.h | 118 +++++++++--------------------------------------------- 1 file changed, 20 insertions(+), 98 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 4d325bd..f7461ff 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -120,84 +120,6 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static FFTS_INLINE void MOVDQA(uint8_t **p, uint8_t reg1, uint8_t reg2, int32_t disp, int is_store) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; - - /* mandatory prefix */ - *(*p)++ = 0x66; - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - if (is_store) { - *(*p)++ = 0x7F; - } else { - *(*p)++ = 0x6F; - } - - r = r1 | (r2 << 3); - - if ((reg1 & XMM_REG) && (reg2 & XMM_REG)) { - assert(disp == 0); - *(*p)++ = 0xC0 | r; - } else { - assert((reg1 & XMM_REG) || (reg2 & XMM_REG)); - - if (disp == 0 && r1 != 5) { - *(*p)++ = r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - } else { - if (disp <= 127 && disp >= -128) { - *(*p)++ = 0x40 | r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - - IMM8(p, disp); - } else { - *(*p)++ = 0x80 | r; - - if (r1 == 4) { - *(*p)++ = 0x24; - } - - IMM32(p, disp); - } - } - } -} - -static FFTS_INLINE void MOVDQA2(uint8_t **p, uint8_t reg1, uint8_t reg2) -{ - if (reg1 & XMM_REG) { - MOVDQA(p, reg2, reg1, 0, 0); - } else { - MOVDQA(p, reg1, reg2, 0, 1); - } -} - -static FFTS_INLINE void MOVDQA3(uint8_t **p, uint8_t reg1, int32_t op2, int32_t op3) -{ - if (reg1 & XMM_REG) { - MOVDQA(p, (uint8_t) op2, reg1, op3, 0); - } else { - MOVDQA(p, reg1, (uint8_t) op3, op2, 1); - } -} - static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg2, uint8_t reg1) { uint8_t r1 = (reg1 & 7); @@ -326,16 +248,16 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) { #ifdef _M_X64 /* restore nonvolatile registers */ - MOVDQA3(fp, XMM6, X64_RSP, 0); - MOVDQA3(fp, XMM7, X64_RSP, 16); - MOVDQA3(fp, XMM8, X64_RSP, 32); - MOVDQA3(fp, XMM9, X64_RSP, 48); - MOVDQA3(fp, XMM10, X64_RSP, 64); - MOVDQA3(fp, XMM11, X64_RSP, 80); - MOVDQA3(fp, XMM12, X64_RSP, 96); - MOVDQA3(fp, XMM13, X64_RSP, 112); - MOVDQA3(fp, XMM14, X64_RSP, 128); - MOVDQA3(fp, XMM15, X64_RSP, 144); + x64_sse_movdqa_reg_membase(*fp, X64_XMM6, X64_RSP, 0); + x64_sse_movdqa_reg_membase(*fp, X64_XMM7, X64_RSP, 16); + x64_sse_movdqa_reg_membase(*fp, X64_XMM8, X64_RSP, 32); + x64_sse_movdqa_reg_membase(*fp, X64_XMM9, X64_RSP, 48); + x64_sse_movdqa_reg_membase(*fp, X64_XMM10, X64_RSP, 64); + x64_sse_movdqa_reg_membase(*fp, X64_XMM11, X64_RSP, 80); + x64_sse_movdqa_reg_membase(*fp, X64_XMM12, X64_RSP, 96); + x64_sse_movdqa_reg_membase(*fp, X64_XMM13, X64_RSP, 112); + x64_sse_movdqa_reg_membase(*fp, X64_XMM14, X64_RSP, 128); + x64_sse_movdqa_reg_membase(*fp, X64_XMM15, X64_RSP, 144); /* restore stack */ x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 168, 8); @@ -377,16 +299,16 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 168, 8); /* to save XMM6-XMM15 registers */ - MOVDQA3(fp, X64_RSP, 0, XMM6); - MOVDQA3(fp, X64_RSP, 16, XMM7); - MOVDQA3(fp, X64_RSP, 32, XMM8); - MOVDQA3(fp, X64_RSP, 48, XMM9); - MOVDQA3(fp, X64_RSP, 64, XMM10); - MOVDQA3(fp, X64_RSP, 80, XMM11); - MOVDQA3(fp, X64_RSP, 96, XMM12); - MOVDQA3(fp, X64_RSP, 112, XMM13); - MOVDQA3(fp, X64_RSP, 128, XMM14); - MOVDQA3(fp, X64_RSP, 144, XMM15); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 0, X64_XMM6); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 16, X64_XMM7); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 32, X64_XMM8); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 48, X64_XMM9); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 64, X64_XMM10); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 80, X64_XMM11); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 96, X64_XMM12); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 112, X64_XMM13); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 128, X64_XMM14); + x64_sse_movdqa_membase_reg(*fp, X64_RSP, 144, X64_XMM15); #else x64_push_reg(*fp, X64_RBP); x64_push_reg(*fp, X64_RBX); -- cgit v1.1 From c665b4cb7a6d8242b4157f14e84f8d47bb014e7f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 12:20:04 +0200 Subject: Replace MULPS with x64_sse_mulps_reg_reg --- src/codegen_sse.h | 43 ++++++++++++------------------------------- 1 file changed, 12 insertions(+), 31 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index f7461ff..c96ab96 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -120,24 +120,6 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static FFTS_INLINE void MULPS(uint8_t **p, uint8_t reg2, uint8_t reg1) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0x59; - *(*p)++ = 0xC0 | r1 | (r2 << 3); -} - static FFTS_INLINE void SHUFPS(uint8_t **p, uint8_t reg2, uint8_t reg1, const int select) { uint8_t r1 = (reg1 & 7); @@ -453,13 +435,13 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x40; *(*fp)++ = 0x10; - MULPS(fp, XMM11, XMM6); - MULPS(fp, XMM9, XMM7); + x64_sse_mulps_reg_reg(*fp, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM7); SHUFPS(fp, XMM6, XMM6, 0xB1); - MULPS(fp, XMM6, XMM8); + x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM8); SHUFPS(fp, XMM7, XMM7, 0xB1); x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM6); - MULPS(fp, XMM8, XMM7); + x64_sse_mulps_reg_reg(*fp, X64_XMM8, X64_XMM7); /* movaps xmm10, xmm11 */ *(*fp)++ = 0x45; @@ -524,10 +506,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x30; x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM10); - MULPS(fp, XMM6, XMM12); + x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM10); - - MULPS(fp, XMM15, XMM13); + x64_sse_mulps_reg_reg(*fp, X64_XMM15, X64_XMM13); /* movaps xmm10, [rax + 0x40] */ *(*fp)++ = 0x44; @@ -543,8 +524,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) SHUFPS(fp, XMM12, XMM12, 0xB1); SHUFPS(fp, XMM13, XMM13, 0xB1); - MULPS(fp, XMM12, XMM14); - MULPS(fp, XMM14, XMM13); + x64_sse_mulps_reg_reg(*fp, X64_XMM12, X64_XMM14); + x64_sse_mulps_reg_reg(*fp, X64_XMM14, X64_XMM13); x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM15, X64_XMM14); @@ -584,10 +565,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) /* input + 6 * input_stride */ x64_alu_reg_imm_size(*fp, X86_ADD, X64_RAX, 0x60, 8); - MULPS(fp, XMM13, XMM7); + x64_sse_mulps_reg_reg(*fp, X64_XMM13, X64_XMM7); x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM15); x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM15); - MULPS(fp, XMM10, XMM8); + x64_sse_mulps_reg_reg(*fp, X64_XMM10, X64_XMM8); x64_sse_subps_reg_reg(*fp, X64_XMM0, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM12); SHUFPS(fp, XMM7, XMM7, 0xB1); @@ -601,8 +582,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x28; *(*fp)++ = 0xE2; - MULPS(fp, XMM7, XMM9); - MULPS(fp, XMM9, XMM8); + x64_sse_mulps_reg_reg(*fp, X64_XMM7, X64_XMM9); + x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM8); x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM7); x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); -- cgit v1.1 From d9e01009d828f4ce7a7988bf0f4e2e1dbab32208 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 12:26:56 +0200 Subject: Replace SHUFPS with x64_sse_shufps_reg_reg_imm --- src/codegen_sse.h | 88 ++++++------------------------------------------------- 1 file changed, 9 insertions(+), 79 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index c96ab96..ec8b5ec 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -63,54 +63,8 @@ extern const uint32_t sse_leaf_oo_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; extern const uint32_t sse_leaf_oe_offsets[8]; -#define XMM_REG 0x40 - -#define XMM0 (XMM_REG | 0x0) -#define XMM1 (XMM_REG | 0x1) -#define XMM2 (XMM_REG | 0x2) -#define XMM3 (XMM_REG | 0x3) -#define XMM4 (XMM_REG | 0x4) -#define XMM5 (XMM_REG | 0x5) -#define XMM6 (XMM_REG | 0x6) -#define XMM7 (XMM_REG | 0x7) -#define XMM8 (XMM_REG | 0x8) -#define XMM9 (XMM_REG | 0x9) -#define XMM10 (XMM_REG | 0xa) -#define XMM11 (XMM_REG | 0xb) -#define XMM12 (XMM_REG | 0xc) -#define XMM13 (XMM_REG | 0xd) -#define XMM14 (XMM_REG | 0xe) -#define XMM15 (XMM_REG | 0xf) - #define P(x) (*(*p)++ = x) -/* forward declarations */ -static void IMM8(uint8_t **p, int32_t imm); -static void IMM32(uint8_t **p, int32_t imm); - -static void IMM8(uint8_t **p, int32_t imm) -{ - *(*p)++ = (imm & 0xff); -} - -static void IMM32(uint8_t **p, int32_t imm) -{ - int i; - - for (i = 0; i < 4; i++) { - *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); - } -} - -static void IMM64(uint8_t **p, int64_t imm) -{ - int i; - - for (i = 0; i < 8; i++) { - *(*p)++ = (imm & (0xff << (8 * i))) >> (8 * i); - } -} - static void IMM32_NI(uint8_t *p, int32_t imm) { int i; @@ -120,29 +74,6 @@ static void IMM32_NI(uint8_t *p, int32_t imm) } } -static FFTS_INLINE void SHUFPS(uint8_t **p, uint8_t reg2, uint8_t reg1, const int select) -{ - uint8_t r1 = (reg1 & 7); - uint8_t r2 = (reg2 & 7); - uint8_t r; - - /* REX prefix */ - if ((reg1 & 8) || (reg2 & 8)) { - *(*p)++ = 0x40 | ((reg1 & 8) >> 3) | ((reg2 & 8) >> 1); - } - - /* esacape opcode */ - *(*p)++ = 0x0F; - - /* opcode */ - *(*p)++ = 0xC6; - - r = r1 | (r2 << 3); - - *(*p)++ = 0xC0 | r; - *(*p)++ = (select & 0xFF); -} - static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) { if (count >= 9) { @@ -437,9 +368,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_mulps_reg_reg(*fp, X64_XMM11, X64_XMM6); x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM7); - SHUFPS(fp, XMM6, XMM6, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM6, X64_XMM6, 0xB1); x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM8); - SHUFPS(fp, XMM7, XMM7, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM6); x64_sse_mulps_reg_reg(*fp, X64_XMM8, X64_XMM7); @@ -522,8 +453,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x28; *(*fp)++ = 0xC5; - SHUFPS(fp, XMM12, XMM12, 0xB1); - SHUFPS(fp, XMM13, XMM13, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); x64_sse_mulps_reg_reg(*fp, X64_XMM12, X64_XMM14); x64_sse_mulps_reg_reg(*fp, X64_XMM14, X64_XMM13); x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM12); @@ -571,10 +502,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_mulps_reg_reg(*fp, X64_XMM10, X64_XMM8); x64_sse_subps_reg_reg(*fp, X64_XMM0, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM12); - SHUFPS(fp, XMM7, XMM7, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); - - SHUFPS(fp, XMM8, XMM8, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM8, X64_XMM8, 0xB1); /* movaps xmm12, xmm2 */ *(*fp)++ = 0x44; @@ -594,14 +524,14 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) *(*fp)++ = 0x24; *(*fp)++ = 0x19; - SHUFPS(fp, XMM11, XMM11, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM11, X64_XMM11, 0xB1); /* movaps xmm1, xmm4 */ *(*fp)++ = 0x0F; *(*fp)++ = 0x28; *(*fp)++ = 0xCC; - SHUFPS(fp, XMM6, XMM6, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM6, X64_XMM6, 0xB1); x64_sse_addps_reg_reg(*fp, X64_XMM1, X64_XMM11); x64_sse_subps_reg_reg(*fp, X64_XMM4, X64_XMM11); x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM6); @@ -629,7 +559,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); x64_sse_addps_reg_reg(*fp, X64_XMM4, X64_XMM11); x64_sse_subps_reg_reg(*fp, X64_XMM14, X64_XMM11); - SHUFPS(fp, XMM13, XMM13, 0xB1); + x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); /* movaps [rcx], xmm5 */ /* output + 0 * output_stride */ -- cgit v1.1 From f342eb3215720f9c2fe621e3445484d55c00ff3d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 13:52:52 +0200 Subject: Added x64_sse_movaps_memindex_reg and x64_sse_movaps_reg_memindex --- x64/x64-codegen.h | 44 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 3fb7104..0fbffbe 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -209,6 +209,9 @@ typedef union { x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \ } while (0) +#define x64_memindex_emit(inst, reg, basereg, disp, indexreg, shift) \ + x86_memindex_emit((inst), ((reg) & 0x7), ((basereg) & 0x7), (disp), ((indexreg) & 0x7), (shift)) + #define x64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \ do { \ if (x86_is_imm8((imm))) { \ @@ -950,6 +953,16 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#define emit_sse_memindex_reg_op2(inst, basereg, disp, indexreg, shift, reg, op1, op2) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex (inst, 0, (reg), (indexreg), (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + x64_codegen_post(inst); \ + } while(0) + #define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ x64_codegen_pre(inst); \ x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \ @@ -959,6 +972,16 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#define emit_sse_reg_memindex_op2(inst, dreg, basereg, disp, indexreg, shift, op1, op2) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex (inst, 0, (dreg), (indexreg), (basereg) == X64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x64_memindex_emit((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \ + x64_codegen_post(inst); \ + } while(0) + /* Three opcode SSE defines */ #define emit_opcode3(inst,op1,op2,op3) do { \ @@ -1391,15 +1414,26 @@ typedef union { #define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) -#define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) +#define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) \ + emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) + +#define x64_sse_movaps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \ + emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x29); + +#define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) \ + emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) -#define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) +#define x64_sse_movaps_reg_memindex(inst, dreg, basereg, disp, indexreg, shift) \ + emit_sse_reg_memindex_op2((inst), (dreg), (basereg), (disp), (indexreg), (shift), 0x0f, 0x28); -#define x64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) +#define x64_sse_movaps_reg_reg(inst, dreg, reg) \ + emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) -#define x64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) +#define x64_sse_movntps_reg_membase(inst, dreg, basereg, disp) \ + emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) -#define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) +#define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) \ + emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) #define x64_sse_movdqa_membase_reg(inst, basereg, disp, reg) \ emit_sse_membase_reg((inst), (basereg), (disp), (reg), 0x66, 0x0f, 0x7f) -- cgit v1.1 From 7fd2a93b0ad374c1377b3504cf55023b90772a58 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 13:55:22 +0200 Subject: Replace "magic bytes" with various macros --- src/codegen_sse.h | 270 ++++++++++++------------------------------------------ 1 file changed, 59 insertions(+), 211 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index ec8b5ec..36e6fb0 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -331,40 +331,19 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x8_soft_loop = *fp; assert(!(((uintptr_t) x8_soft_loop) & 0xF)); - /* movaps xmm9, [rax] */ - /* input + 0 * input_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x08; + /* load [input + 0 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RAX, 0); - /* movaps xmm6, [rcx + rbx*2] */ - /* output + 2 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x34; - *(*fp)++ = 0x59; + /* load [output + 2 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM6, X64_RCX, 0, X64_RBX, 1); - /* movaps xmm11, xmm9 */ - *(*fp)++ = 0x45; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xD9; + x64_sse_movaps_reg_reg(*fp, X64_XMM11, X64_XMM9); - /* movaps xmm7, [rcx + rsi] */ - /* output + 3 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x3C; - *(*fp)++ = 0x31; + /* load [output + 3 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM7, X64_RCX, 0, X64_RSI, 0); - /* movaps xmm8, [rax + 0x10] */ - /* input + 1 * input_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x40; - *(*fp)++ = 0x10; + /* load [input + 1 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RAX, 16); x64_sse_mulps_reg_reg(*fp, X64_XMM11, X64_XMM6); x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM7); @@ -373,86 +352,42 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM6); x64_sse_mulps_reg_reg(*fp, X64_XMM8, X64_XMM7); - - /* movaps xmm10, xmm11 */ - *(*fp)++ = 0x45; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xD3; - + x64_sse_movaps_reg_reg(*fp, X64_XMM10, X64_XMM11); x64_sse_addps_reg_reg(*fp, X64_XMM9, X64_XMM8); - /* movaps xmm15, [rax + 0x20] */ - /* input + 2 * input_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x78; - *(*fp)++ = 0x20; + /* load [input + 2 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RAX, 32); x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM9); - /* movaps xmm5, [rcx] */ - /* output + 0 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x29; + /* load [output + 0 * output_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM5, X64_RCX, 0); - /* movaps xmm6,xmm15 */ - *(*fp)++ = 0x41; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xF7; + x64_sse_movaps_reg_reg(*fp, X64_XMM6, X64_XMM15); - /* movaps xmm12, [rcx + rbx*4] */ - /* output + 4 * output_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x24; - *(*fp)++ = 0x99; + /* load [output + 4 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM12, X64_RCX, 0, X64_RBX, 2); - /* movaps xmm2, xmm5 */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xD5; + x64_sse_movaps_reg_reg(*fp, X64_XMM2, X64_XMM5); - /* movaps xmm13, [rcx + rsi*2] */ - /* output + 6 * output_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x2C; - *(*fp)++ = 0x71; + /* load [output + 6 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM13, X64_RCX, 0, X64_RSI, 1); x64_sse_xorps_reg_reg(*fp, X64_XMM11, X64_XMM3); - /* movaps xmm14, [rax + 0x30] */ - /* input + 3 * input_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x70; - *(*fp)++ = 0x30; + /* load [input + 3 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RAX, 48); x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM10); x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM10); x64_sse_mulps_reg_reg(*fp, X64_XMM15, X64_XMM13); - /* movaps xmm10, [rax + 0x40] */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x50; - *(*fp)++ = 0x40; - - /* movaps xmm0, xmm5 */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xC5; + /* load [input + 4 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RAX, 64); + x64_sse_movaps_reg_reg(*fp, X64_XMM0, X64_XMM5); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM12, X64_XMM12, 0xB1); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); x64_sse_mulps_reg_reg(*fp, X64_XMM12, X64_XMM14); @@ -460,40 +395,20 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM12); x64_sse_addps_reg_reg(*fp, X64_XMM15, X64_XMM14); - /* movaps xmm7, [rcx + r10] */ - *(*fp)++ = 0x42; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x3C; - *(*fp)++ = 0x11; + /* load [output + 5 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM7, X64_RCX, 0, X64_R10, 0); - /* movaps xmm13, xmm10 */ - *(*fp)++ = 0x45; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xEA; + x64_sse_movaps_reg_reg(*fp, X64_XMM13, X64_XMM10); - /* movaps xmm8, [rcx + r11] */ - *(*fp)++ = 0x46; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x04; - *(*fp)++ = 0x19; + /* load [output + 7 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM8, X64_RCX, 0, X64_R11, 0); - /* movaps xmm12, xmm6 */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xE6; + x64_sse_movaps_reg_reg(*fp, X64_XMM12, X64_XMM6); - /* movaps xmm9, [rax + 0x50] */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x48; - *(*fp)++ = 0x50; + /* load [input + 5 * input_stride] */ + x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RAX, 80); - /* input + 6 * input_stride */ + /* move input by 6 * input_stride */ x64_alu_reg_imm_size(*fp, X86_ADD, X64_RAX, 0x60, 8); x64_sse_mulps_reg_reg(*fp, X64_XMM13, X64_XMM7); @@ -505,55 +420,25 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM8, X64_XMM8, 0xB1); - - /* movaps xmm12, xmm2 */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xE2; - + x64_sse_movaps_reg_reg(*fp, X64_XMM12, X64_XMM2); x64_sse_mulps_reg_reg(*fp, X64_XMM7, X64_XMM9); x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM8); x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM7); x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); - /* movaps xmm4, [rcx + rbx] */ - /* output + 1 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0x24; - *(*fp)++ = 0x19; + /* load [output + 1 * output_stride] */ + x64_sse_movaps_reg_memindex(*fp, X64_XMM4, X64_RCX, 0, X64_RBX, 0); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM11, X64_XMM11, 0xB1); - - /* movaps xmm1, xmm4 */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xCC; - + x64_sse_movaps_reg_reg(*fp, X64_XMM1, X64_XMM4); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM6, X64_XMM6, 0xB1); x64_sse_addps_reg_reg(*fp, X64_XMM1, X64_XMM11); x64_sse_subps_reg_reg(*fp, X64_XMM4, X64_XMM11); x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM6); x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM6); - - /* movaps xmm11, xmm13 */ - *(*fp)++ = 0x45; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xDD; - - /* movaps xmm14, xmm4 */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xF4; - - /* movaps xmm6, xmm1 */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x28; - *(*fp)++ = 0xF1; - + x64_sse_movaps_reg_reg(*fp, X64_XMM11, X64_XMM13); + x64_sse_movaps_reg_reg(*fp, X64_XMM14, X64_XMM4); + x64_sse_movaps_reg_reg(*fp, X64_XMM6, X64_XMM1); x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM10); x64_sse_addps_reg_reg(*fp, X64_XMM11, X64_XMM10); x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); @@ -561,72 +446,35 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_subps_reg_reg(*fp, X64_XMM14, X64_XMM11); x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); - /* movaps [rcx], xmm5 */ - /* output + 0 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x29; + /* store [output + 0 * output_stride] */ + x64_sse_movaps_membase_reg(*fp, X64_RCX, 0, X64_XMM5); - /* movaps [rcx + rbx], xmm4 */ - /* output + 1 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x24; - *(*fp)++ = 0x19; + /* store [output + 1 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 0, X64_XMM4); - /* movaps [rcx + rbx*2], xmm2 */ - /* output + 2 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x14; - *(*fp)++ = 0x59; + /* store [output + 2 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 1, X64_XMM2); x64_sse_subps_reg_reg(*fp, X64_XMM1, X64_XMM13); x64_sse_addps_reg_reg(*fp, X64_XMM6, X64_XMM13); - /* movaps [rcx + rsi], xmm1 */ - /* output + 3 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x0C; - *(*fp)++ = 0x31; + /* store [output + 3 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RSI, 0, X64_XMM1); - /* movaps [rcx + rbx*4], xmm0 */ - /* output + 4 * output_stride */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x04; - *(*fp)++ = 0x99; + /* store [output + 4 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 2, X64_XMM0); - /* movaps [rcx + r10], xmm14 */ - /* output + 5 * output_stride */ - *(*fp)++ = 0x46; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x34; - *(*fp)++ = 0x11; + /* store [output + 5 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_R10, 0, X64_XMM14); - /* movaps [rcx + rsi*2], xmm12 */ - /* output + 6 * output_stride */ - *(*fp)++ = 0x44; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x24; - *(*fp)++ = 0x71; + /* store [output + 6 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RSI, 1, X64_XMM12); - /* movaps [rcx + r11], xmm6 */ - /* output + 7 * output_stride */ - *(*fp)++ = 0x42; - *(*fp)++ = 0x0F; - *(*fp)++ = 0x29; - *(*fp)++ = 0x34; - *(*fp)++ = 0x19; + /* store [output + 7 * output_stride] */ + x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_R11, 0, X64_XMM6); - /* add rcx, 0x10 */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x83; - *(*fp)++ = 0xC1; - *(*fp)++ = 0x10; + /* move output by 16 */ + x64_alu_reg_imm_size(*fp, X86_ADD, X64_RCX, 16, 8); /* cmp rcx, rdx */ *(*fp)++ = 0x48; -- cgit v1.1 From 8ef1d3a20ab69ef66f8506e61a08d4cfcc82d3f9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 17:47:01 +0200 Subject: Replace amd64 with x64 in 32 bit header, which means that some macros are in wrong place! --- x86/x86-codegen.h | 66 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/x86/x86-codegen.h b/x86/x86-codegen.h index fec8ccb..0052076 100644 --- a/x86/x86-codegen.h +++ b/x86/x86-codegen.h @@ -332,7 +332,7 @@ typedef union { #if defined(__native_client_codegen__) && defined(TARGET_AMD64) #define x86_membase_emit(inst,r,basereg,disp) \ do { \ - amd64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \ + x64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \ } while (0) #else /* __default_codegen__ || 32-bit NaCl codegen */ #define x86_membase_emit(inst,r,basereg,disp) \ @@ -506,7 +506,7 @@ typedef union { /* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */ #define x86_prefix(inst,p) \ do { \ - amd64_nacl_tag_legacy_prefix((inst)); \ + x64_nacl_tag_legacy_prefix((inst)); \ *(inst)++ =(unsigned char) (p); \ } while (0) @@ -1743,21 +1743,21 @@ typedef union { } while (0) #elif defined(TARGET_AMD64) /* These macros are used directly from mini-amd64.c and other */ -/* amd64 specific files, so they need to be instrumented directly. */ +/* x64 specific files, so they need to be instrumented directly. */ #define x86_jump32(inst,imm) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ *(inst)++ = (unsigned char)0xe9; \ x86_imm_emit32 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define x86_jump8(inst,imm) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ *(inst)++ = (unsigned char)0xeb; \ x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif @@ -1826,27 +1826,29 @@ typedef union { } \ } while (0) -#if defined(__default_codegen__) -#define x86_jump_code(inst,target) \ - do { \ - x86_jump_code_body((inst),(target)); \ - } while (0) -#elif defined(__native_client_codegen__) && defined(TARGET_X86) +#if defined(__native_client_codegen__) +#if defined(TARGET_X86) #define x86_jump_code(inst,target) \ do { \ guint8* jump_start = (inst); \ x86_jump_code_body((inst),(target)); \ x86_patch(jump_start, (target)); \ } while (0) -#elif defined(__native_client_codegen__) && defined(TARGET_AMD64) +#else if defined(TARGET_AMD64) #define x86_jump_code(inst,target) \ do { \ /* jump_code_body is used twice because there are offsets */ \ /* calculated based on the IP, which can change after the */ \ - /* call to amd64_codegen_post */ \ - amd64_codegen_pre(inst); \ + /* call to x64_codegen_post */ \ + x64_codegen_pre(inst); \ x86_jump_code_body((inst),(target)); \ - inst = amd64_codegen_post(inst); \ + inst = x64_codegen_post(inst); \ + x86_jump_code_body((inst),(target)); \ + } while (0) +#endif +#else +#define x86_jump_code(inst,target) \ + do { \ x86_jump_code_body((inst),(target)); \ } while (0) #endif /* __native_client_codegen__ */ @@ -1885,27 +1887,27 @@ typedef union { } while (0) #elif defined(TARGET_AMD64) /* These macros are used directly from mini-amd64.c and other */ -/* amd64 specific files, so they need to be instrumented directly. */ +/* x64 specific files, so they need to be instrumented directly. */ #define x86_branch8(inst,cond,imm,is_signed) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)]; \ else \ *(inst)++ = x86_cc_unsigned_map [(cond)]; \ x86_imm_emit8 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #define x86_branch32(inst,cond,imm,is_signed) \ do { \ - amd64_codegen_pre(inst); \ + x64_codegen_pre(inst); \ *(inst)++ = (unsigned char)0x0f; \ if ((is_signed)) \ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \ else \ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \ x86_imm_emit32 ((inst), (imm)); \ - amd64_codegen_post(inst); \ + x64_codegen_post(inst); \ } while (0) #endif @@ -1928,7 +1930,7 @@ typedef union { } while (0) #elif defined(TARGET_AMD64) /* This macro is used directly from mini-amd64.c and other */ -/* amd64 specific files, so it needs to be instrumented directly. */ +/* x64 specific files, so it needs to be instrumented directly. */ #define x86_branch_body(inst,cond,target,is_signed) \ do { \ @@ -1941,20 +1943,20 @@ typedef union { } \ } while (0) -#if defined(__default_codegen__) -#define x86_branch(inst,cond,target,is_signed) \ - do { \ - x86_branch_body((inst),(cond),(target),(is_signed)); \ - } while (0) -#elif defined(__native_client_codegen__) +#if defined(__native_client_codegen__) #define x86_branch(inst,cond,target,is_signed) \ do { \ /* branch_body is used twice because there are offsets */ \ /* calculated based on the IP, which can change after */ \ - /* the call to amd64_codegen_post */ \ - amd64_codegen_pre(inst); \ + /* the call to x64_codegen_post */ \ + x64_codegen_pre(inst); \ x86_branch_body((inst),(cond),(target),(is_signed)); \ - inst = amd64_codegen_post(inst); \ + inst = x64_codegen_post(inst); \ + x86_branch_body((inst),(cond),(target),(is_signed)); \ + } while (0) +#else +#define x86_branch(inst,cond,target,is_signed) \ + do { \ x86_branch_body((inst),(cond),(target),(is_signed)); \ } while (0) #endif /* __native_client_codegen__ */ -- cgit v1.1 From 6d85fa94dd825b6aa4eebb3bfc3d5adc67f5177b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 9 Nov 2014 17:51:01 +0200 Subject: Removed last bits of magic from "generate_size8_base_case". Replace x64_call_imm with x64_call_code. --- src/codegen.c | 4 +- src/codegen_sse.h | 258 +++++++++++++++++++++++++----------------------------- 2 files changed, 122 insertions(+), 140 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 92f7553..efa8e9a 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -397,9 +397,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N } if (pps[0] == 2 * leaf_N) { - x64_call_imm(fp, (char*) x_4_addr - ((char*) fp + 4)); + x64_call_code(fp, x_4_addr); } else { - x64_call_imm(fp, (char*) x_8_addr - ((char*) fp + 4)); + x64_call_code(fp, x_8_addr); } pAddr = 4 * pps[1]; diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 36e6fb0..bbc91b9 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -34,6 +34,7 @@ #ifndef FFTS_CODEGEN_SSE_H #define FFTS_CODEGEN_SSE_H +#define TARGET_AMD64 #include "arch/x64/x64-codegen.h" #include @@ -275,6 +276,7 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) { + insns_t *ins; insns_t *x_8_addr; #ifdef _M_X64 insns_t *x8_soft_loop; @@ -282,223 +284,203 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) size_t len; #endif + /* to avoid deferring */ + ins = *fp; + /* align call destination */ - ffts_align_mem16(fp, 0); - x_8_addr = *fp; + ffts_align_mem16(&ins, 0); + x_8_addr = ins; /* align loop/jump destination */ #ifdef _M_X64 - ffts_align_mem16(fp, 6); + ffts_align_mem16(&ins, 6); #else - ffts_align_mem16(fp, 5); + ffts_align_mem16(&ins, 5); #endif #ifdef _M_X64 /* input */ - x64_mov_reg_reg(*fp, X64_RAX, X64_RDI, 8); + x64_mov_reg_reg(ins, X64_RAX, X64_RDI, 8); /* output */ - x64_mov_reg_reg(*fp, X64_RCX, X64_R8, 8); - - /* lea rdx, [r8 + rbx] */ - /* loop stop (output + output_stride) */ - *(*fp)++ = 0x49; - *(*fp)++ = 0x8D; - *(*fp)++ = 0x14; - *(*fp)++ = 0x18; - - /* lea rsi, [rbx + rbx*2] */ - /* 3 * output_stride */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x8D; - *(*fp)++ = 0x34; - *(*fp)++ = 0x5B; - - /* lea r10, [rbx + rbx*4] */ - /* 5 * output_stride */ - *(*fp)++ = 0x4C; - *(*fp)++ = 0x8D; - *(*fp)++ = 0x14; - *(*fp)++ = 0x9B; - - /* lea r11, [rsi + rbx*4] */ - /* 7 * output_stride */ - *(*fp)++ = 0x4C; - *(*fp)++ = 0x8D; - *(*fp)++ = 0x1C; - *(*fp)++ = 0x9E; - - x8_soft_loop = *fp; + x64_mov_reg_reg(ins, X64_RCX, X64_R8, 8); + + /* loop stop (RDX = output + output_stride) */ + x64_lea_memindex(ins, X64_RDX, X64_R8, 0, X64_RBX, 0); + + /* RSI = 3 * output_stride */ + x64_lea_memindex(ins, X64_RSI, X64_RBX, 0, X64_RBX, 1); + + /* R10 = 5 * output_stride */ + x64_lea_memindex(ins, X64_R10, X64_RBX, 0, X64_RBX, 2); + + /* R11 = 7 * output_stride */ + x64_lea_memindex(ins, X64_R11, X64_RSI, 0, X64_RBX, 2); + + /* beginning of the loop (make sure it's 16 byte aligned) */ + x8_soft_loop = ins; assert(!(((uintptr_t) x8_soft_loop) & 0xF)); /* load [input + 0 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RAX, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 0); /* load [output + 2 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM6, X64_RCX, 0, X64_RBX, 1); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RCX, 0, X64_RBX, 1); - x64_sse_movaps_reg_reg(*fp, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); /* load [output + 3 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM7, X64_RCX, 0, X64_RSI, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_RSI, 0); /* load [input + 1 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RAX, 16); - - x64_sse_mulps_reg_reg(*fp, X64_XMM11, X64_XMM6); - x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM7); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM6, X64_XMM6, 0xB1); - x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM8); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM6); - x64_sse_mulps_reg_reg(*fp, X64_XMM8, X64_XMM7); - x64_sse_movaps_reg_reg(*fp, X64_XMM10, X64_XMM11); - x64_sse_addps_reg_reg(*fp, X64_XMM9, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RAX, 16); + + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); /* load [input + 2 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RAX, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RAX, 32); - x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); - x64_sse_subps_reg_reg(*fp, X64_XMM11, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); /* load [output + 0 * output_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM5, X64_RCX, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RCX, 0); - x64_sse_movaps_reg_reg(*fp, X64_XMM6, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15); /* load [output + 4 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM12, X64_RCX, 0, X64_RBX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RCX, 0, X64_RBX, 2); - x64_sse_movaps_reg_reg(*fp, X64_XMM2, X64_XMM5); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); /* load [output + 6 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM13, X64_RCX, 0, X64_RSI, 1); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RCX, 0, X64_RSI, 1); - x64_sse_xorps_reg_reg(*fp, X64_XMM11, X64_XMM3); + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); /* load [input + 3 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RAX, 48); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RAX, 48); - x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM10); - x64_sse_mulps_reg_reg(*fp, X64_XMM6, X64_XMM12); - x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM10); - x64_sse_mulps_reg_reg(*fp, X64_XMM15, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13); /* load [input + 4 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RAX, 64); + x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RAX, 64); - x64_sse_movaps_reg_reg(*fp, X64_XMM0, X64_XMM5); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); - x64_sse_mulps_reg_reg(*fp, X64_XMM12, X64_XMM14); - x64_sse_mulps_reg_reg(*fp, X64_XMM14, X64_XMM13); - x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM12); - x64_sse_addps_reg_reg(*fp, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); /* load [output + 5 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM7, X64_RCX, 0, X64_R10, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_R10, 0); - x64_sse_movaps_reg_reg(*fp, X64_XMM13, X64_XMM10); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10); /* load [output + 7 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM8, X64_RCX, 0, X64_R11, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RCX, 0, X64_R11, 0); - x64_sse_movaps_reg_reg(*fp, X64_XMM12, X64_XMM6); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6); /* load [input + 5 * input_stride] */ - x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RAX, 80); + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 80); /* move input by 6 * input_stride */ - x64_alu_reg_imm_size(*fp, X86_ADD, X64_RAX, 0x60, 8); - - x64_sse_mulps_reg_reg(*fp, X64_XMM13, X64_XMM7); - x64_sse_subps_reg_reg(*fp, X64_XMM6, X64_XMM15); - x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM15); - x64_sse_mulps_reg_reg(*fp, X64_XMM10, X64_XMM8); - x64_sse_subps_reg_reg(*fp, X64_XMM0, X64_XMM12); - x64_sse_addps_reg_reg(*fp, X64_XMM5, X64_XMM12); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_xorps_reg_reg(*fp, X64_XMM6, X64_XMM3); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM8, X64_XMM8, 0xB1); - x64_sse_movaps_reg_reg(*fp, X64_XMM12, X64_XMM2); - x64_sse_mulps_reg_reg(*fp, X64_XMM7, X64_XMM9); - x64_sse_mulps_reg_reg(*fp, X64_XMM9, X64_XMM8); - x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM7); - x64_sse_addps_reg_reg(*fp, X64_XMM10, X64_XMM9); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 0x60, 8); + + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); /* load [output + 1 * output_stride] */ - x64_sse_movaps_reg_memindex(*fp, X64_XMM4, X64_RCX, 0, X64_RBX, 0); - - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM11, X64_XMM11, 0xB1); - x64_sse_movaps_reg_reg(*fp, X64_XMM1, X64_XMM4); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM6, X64_XMM6, 0xB1); - x64_sse_addps_reg_reg(*fp, X64_XMM1, X64_XMM11); - x64_sse_subps_reg_reg(*fp, X64_XMM4, X64_XMM11); - x64_sse_addps_reg_reg(*fp, X64_XMM12, X64_XMM6); - x64_sse_subps_reg_reg(*fp, X64_XMM2, X64_XMM6); - x64_sse_movaps_reg_reg(*fp, X64_XMM11, X64_XMM13); - x64_sse_movaps_reg_reg(*fp, X64_XMM14, X64_XMM4); - x64_sse_movaps_reg_reg(*fp, X64_XMM6, X64_XMM1); - x64_sse_subps_reg_reg(*fp, X64_XMM13, X64_XMM10); - x64_sse_addps_reg_reg(*fp, X64_XMM11, X64_XMM10); - x64_sse_xorps_reg_reg(*fp, X64_XMM13, X64_XMM3); - x64_sse_addps_reg_reg(*fp, X64_XMM4, X64_XMM11); - x64_sse_subps_reg_reg(*fp, X64_XMM14, X64_XMM11); - x64_sse_shufps_reg_reg_imm(*fp, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RCX, 0, X64_RBX, 0); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); /* store [output + 0 * output_stride] */ - x64_sse_movaps_membase_reg(*fp, X64_RCX, 0, X64_XMM5); + x64_sse_movaps_membase_reg(ins, X64_RCX, 0, X64_XMM5); /* store [output + 1 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 0, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 0, X64_XMM4); /* store [output + 2 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 1, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 1, X64_XMM2); - x64_sse_subps_reg_reg(*fp, X64_XMM1, X64_XMM13); - x64_sse_addps_reg_reg(*fp, X64_XMM6, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13); /* store [output + 3 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RSI, 0, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 0, X64_XMM1); /* store [output + 4 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RBX, 2, X64_XMM0); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 2, X64_XMM0); /* store [output + 5 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_R10, 0, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R10, 0, X64_XMM14); /* store [output + 6 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_RSI, 1, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 1, X64_XMM12); /* store [output + 7 * output_stride] */ - x64_sse_movaps_memindex_reg(*fp, X64_RCX, 0, X64_R11, 0, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R11, 0, X64_XMM6); /* move output by 16 */ - x64_alu_reg_imm_size(*fp, X86_ADD, X64_RCX, 16, 8); - - /* cmp rcx, rdx */ - *(*fp)++ = 0x48; - *(*fp)++ = 0x39; - *(*fp)++ = 0xD1; + x64_alu_reg_imm_size(ins, X86_ADD, X64_RCX, 16, 8); - /* jne [x8_soft_loop] */ - *(*fp)++ = 0x0F; - *(*fp)++ = 0x85; - *(*fp)++ = 0x9E; - *(*fp)++ = 0xFE; - *(*fp)++ = 0xFF; - *(*fp)++ = 0xFF; + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RDX, 8); + x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4); /* ret */ - x64_ret(*fp); + x64_ret(ins); #else /* copy function */ assert((char*) x8_soft_end >= (char*) x8_soft); len = (char*) x8_soft_end - (char*) x8_soft; - memcpy(*fp, x8_soft, len); - *fp += len; + memcpy(ins, x8_soft, len); + ins += len; #endif + *fp = ins; return x_8_addr; } -- cgit v1.1 From 220ec967d41b2b53d7b66fa77e748cd127f73a2d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 10 Nov 2014 00:20:33 +0200 Subject: Generate function in "generate_size4_base_case" --- src/codegen_sse.h | 89 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 79 insertions(+), 10 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index bbc91b9..fcab9f3 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -258,26 +258,96 @@ static FFTS_INLINE void generate_transform_init(insns_t **fp) static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) { - insns_t *x_4_addr; + insns_t *ins; + insns_t *x4_addr; size_t len; + /* to avoid deferring */ + ins = *fp; + /* align call destination */ - ffts_align_mem16(fp, 0); - x_4_addr = *fp; + ffts_align_mem16(&ins, 0); + x4_addr = ins; +#ifdef _M_X64 + /* generate function */ + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R8, 64); + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 96); + x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_R8, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_RDI, 0); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_RDI, 16); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM6); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM5); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32); + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112); + x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM8); + x64_sse_movaps_membase_reg(ins, X64_R8, 0, X64_XMM7); + x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8); + x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9); + x64_sse_movaps_membase_reg(ins, X64_R8, 96, X64_XMM10); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RDI, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RDI, 48); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM5); + x64_sse_movaps_membase_reg(ins, X64_R8, 16, X64_XMM1); + x64_sse_movaps_membase_reg(ins, X64_R8, 48, X64_XMM2); + x64_sse_movaps_membase_reg(ins, X64_R8, 80, X64_XMM4); + x64_sse_movaps_membase_reg(ins, X64_R8, 112, X64_XMM5); + x64_ret(ins); +#else /* copy function */ assert((char*) x8_soft > (char*) x4); len = (char*) x8_soft - (char*) x4; - memcpy(*fp, x4, len); - *fp += len; + memcpy(ins, x4, len); + ins += len; +#endif - return x_4_addr; + *fp = ins; + return x4_addr; } static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) { insns_t *ins; - insns_t *x_8_addr; + insns_t *x8_addr; #ifdef _M_X64 insns_t *x8_soft_loop; #else @@ -289,7 +359,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) /* align call destination */ ffts_align_mem16(&ins, 0); - x_8_addr = ins; + x8_addr = ins; /* align loop/jump destination */ #ifdef _M_X64 @@ -470,7 +540,6 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RDX, 8); x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4); - /* ret */ x64_ret(ins); #else /* copy function */ @@ -481,7 +550,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) #endif *fp = ins; - return x_8_addr; + return x8_addr; } #endif /* FFTS_CODEGEN_SSE_H */ \ No newline at end of file -- cgit v1.1 From 11fbf3ec5a1a7120c5f790300d9eaf12adde8296 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 10 Nov 2014 15:34:07 +0200 Subject: Add size parameter to emit_sse_reg_membase_op2/emit_sse_membase_reg_op2 --- x64/x64-codegen.h | 91 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 34 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 0fbffbe..ad53898 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -925,33 +925,39 @@ typedef union { //TODO Reorganize SSE opcode defines. /* Two opcode SSE defines */ +#define emit_sse_reg_reg_op2(inst, dreg, reg, op1, op2) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (reg), (op1), (op2), 0) -#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \ - x64_codegen_pre(inst); \ - x64_emit_rex ((inst), size, (dreg), 0, (reg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - x86_reg_emit ((inst), (dreg), (reg)); \ - x64_codegen_post(inst); \ -} while (0) +#define emit_sse_reg_reg_op2_size(inst, dreg, reg, op1, op2, size) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), size, (dreg), 0, (reg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x86_reg_emit ((inst), (dreg), (reg)); \ + x64_codegen_post(inst); \ + } while (0) -#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0) +#define emit_sse_reg_reg_op2_imm(inst, dreg, reg, op1, op2, imm) \ + do { \ + x64_codegen_pre(inst); \ + emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ + x86_imm_emit8 ((inst), (imm)); \ + x64_codegen_post(inst); \ + } while (0) -#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \ - x64_codegen_pre(inst); \ - emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \ - x86_imm_emit8 ((inst), (imm)); \ - x64_codegen_post(inst); \ -} while (0) +#define emit_sse_membase_reg_op2(inst, basereg, disp, reg, op1, op2) \ + emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), (op1), (op2), 0) -#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \ - x64_codegen_pre(inst); \ - x64_emit_rex ((inst), 0, (reg), 0, (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - x64_membase_emit ((inst), (reg), (basereg), (disp)); \ - x64_codegen_post(inst); \ -} while (0) +#define emit_sse_membase_reg_op2_size(inst, basereg, disp, reg, op1, op2, size) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), (size), (reg), 0, (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x64_membase_emit ((inst), (reg), (basereg), (disp)); \ + x64_codegen_post(inst); \ + } while (0) #define emit_sse_memindex_reg_op2(inst, basereg, disp, indexreg, shift, reg, op1, op2) \ do { \ @@ -963,14 +969,18 @@ typedef union { x64_codegen_post(inst); \ } while(0) -#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \ - x64_codegen_pre(inst); \ - x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \ - *(inst)++ = (unsigned char)(op1); \ - *(inst)++ = (unsigned char)(op2); \ - x64_membase_emit ((inst), (dreg), (basereg), (disp)); \ - x64_codegen_post(inst); \ -} while (0) +#define emit_sse_reg_membase_op2(inst, dreg, basereg, disp, op1, op2) \ + emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), (op1), (op2), 0) + +#define emit_sse_reg_membase_op2_size(inst, dreg, basereg, disp, op1, op2, size) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex ((inst), (size), (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \ + *(inst)++ = (unsigned char)(op1); \ + *(inst)++ = (unsigned char)(op2); \ + x64_membase_emit ((inst), (dreg), (basereg), (disp)); \ + x64_codegen_post(inst); \ + } while (0) #define emit_sse_reg_memindex_op2(inst, dreg, basereg, disp, indexreg, shift, op1, op2) \ do { \ @@ -983,7 +993,6 @@ typedef union { } while(0) /* Three opcode SSE defines */ - #define emit_opcode3(inst,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ @@ -1410,19 +1419,33 @@ typedef union { #define x64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) -#define x64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) +#define x64_sse_movups_membase_reg(inst, basereg, disp, reg) \ + emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) + +#define x64_sse_movups_membase_reg_size(inst, basereg, disp, reg, size) \ + emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x11, (size)) -#define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) +#define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) \ + emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10) + +#define x64_sse_movups_reg_membase_size(inst, dreg, basereg, disp, size) \ + emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x10, (size)) #define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) \ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29) +#define x64_sse_movaps_membase_reg_size(inst, basereg, disp, reg, size) \ + emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x29, (size)) + #define x64_sse_movaps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \ emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x29); #define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) \ emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28) +#define x64_sse_movaps_reg_membase_size(inst, dreg, basereg, disp, size) \ + emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x28, (size)) + #define x64_sse_movaps_reg_memindex(inst, dreg, basereg, disp, indexreg, shift) \ emit_sse_reg_memindex_op2((inst), (dreg), (basereg), (disp), (indexreg), (shift), 0x0f, 0x28); -- cgit v1.1 From 20766e39cbc37bd5fabe1a144a270a99541955b2 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 10 Nov 2014 17:07:31 +0200 Subject: Replace movdqa with movaps which is one byte shorter. Don't need RDI register as R9 is saved by caller. --- src/codegen.c | 31 ++++++++------------- src/codegen_sse.h | 83 ++++++++++++++++++++++++++++--------------------------- src/sse_win64.s | 36 ++++++++++++------------ 3 files changed, 71 insertions(+), 79 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index efa8e9a..6c6c887 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -150,19 +150,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N #ifdef __arm__ start = generate_prologue(&fp, p); -#else - start = generate_prologue(&fp, p); - - /* assign loop counter register */ - loop_count = 4 * p->i0; -#ifdef _M_X64 - x86_mov_reg_imm(fp, X86_EBX, loop_count); -#else - x86_mov_reg_imm(fp, X86_ECX, loop_count); -#endif -#endif -#ifdef __arm__ #ifdef HAVE_NEON memcpy(fp, neon_ee, neon_oo - neon_ee); if (sign < 0) { @@ -201,24 +189,27 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += (vfp_o - vfp_e) / 4; #endif #else - //fprintf(stderr, "Body start address = %016p\n", start); + /* generate function */ + start = generate_prologue(&fp, p); + loop_count = 4 * p->i0; #ifdef _M_X64 - /* generate function */ + /* set loop counter */ + x86_mov_reg_imm(fp, X86_EBX, loop_count); /* clear */ x86_clear_reg(fp, X86_EAX); /* set "pointer" to offsets */ - x64_mov_reg_membase(fp, X64_RDI, X64_RCX, 0x0, 8); + x64_mov_reg_membase(fp, X64_R9, X64_RCX, 0x0, 8); /* set "pointer" to constants */ x64_mov_reg_membase(fp, X64_RSI, X64_RCX, 0xE0, 8); - - /* align loop/jump destination */ - ffts_align_mem16(&fp, 8); #else - /* copy function */ + /* set loop counter */ + x86_mov_reg_imm(fp, X86_ECX, loop_count); + + /* copy function */ assert((char*) leaf_ee > (char*) leaf_ee_init); len = (char*) leaf_ee - (char*) leaf_ee_init; memcpy(fp, leaf_ee_init, (size_t) len); @@ -390,7 +381,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - x64_alu_reg_imm_size(fp, X86_ADD, X64_RDI, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R9, offset, 8); #else x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); #endif diff --git a/src/codegen_sse.h b/src/codegen_sse.h index fcab9f3..40bfa3f 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -162,24 +162,24 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) { #ifdef _M_X64 /* restore nonvolatile registers */ - x64_sse_movdqa_reg_membase(*fp, X64_XMM6, X64_RSP, 0); - x64_sse_movdqa_reg_membase(*fp, X64_XMM7, X64_RSP, 16); - x64_sse_movdqa_reg_membase(*fp, X64_XMM8, X64_RSP, 32); - x64_sse_movdqa_reg_membase(*fp, X64_XMM9, X64_RSP, 48); - x64_sse_movdqa_reg_membase(*fp, X64_XMM10, X64_RSP, 64); - x64_sse_movdqa_reg_membase(*fp, X64_XMM11, X64_RSP, 80); - x64_sse_movdqa_reg_membase(*fp, X64_XMM12, X64_RSP, 96); - x64_sse_movdqa_reg_membase(*fp, X64_XMM13, X64_RSP, 112); - x64_sse_movdqa_reg_membase(*fp, X64_XMM14, X64_RSP, 128); - x64_sse_movdqa_reg_membase(*fp, X64_XMM15, X64_RSP, 144); + x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, -64, 8); + x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, -56, 8); + + x64_sse_movaps_reg_membase(*fp, X64_XMM6, X64_RSP, -48); + x64_sse_movaps_reg_membase(*fp, X64_XMM7, X64_RSP, -32); + x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RSP, -16); + x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RSP, 0); + x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RSP, 16); + x64_sse_movaps_reg_membase(*fp, X64_XMM11, X64_RSP, 32); + x64_sse_movaps_reg_membase(*fp, X64_XMM12, X64_RSP, 48); + x64_sse_movaps_reg_membase(*fp, X64_XMM13, X64_RSP, 64); + + /* restore the last 2 registers from the shadow space */ + x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RSP, 96); + x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RSP, 112); /* restore stack */ - x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 168, 8); - - /* restore the last 3 registers from the shadow space */ - x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, 8, 8); - x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, 16, 8); - x64_mov_reg_membase(*fp, X64_RDI, X64_RSP, 24, 8); + x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 88, 8); #else x64_pop_reg(*fp, X64_R15); x64_pop_reg(*fp, X64_R14); @@ -204,25 +204,24 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) /* save nonvolatile registers */ #ifdef _M_X64 - /* use the shadow space to save first 3 registers */ - x64_mov_membase_reg(*fp, X64_RSP, 8, X64_RBX, 8); - x64_mov_membase_reg(*fp, X64_RSP, 16, X64_RSI, 8); - x64_mov_membase_reg(*fp, X64_RSP, 24, X64_RDI, 8); - - /* reserve space.. */ - x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 168, 8); - - /* to save XMM6-XMM15 registers */ - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 0, X64_XMM6); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 16, X64_XMM7); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 32, X64_XMM8); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 48, X64_XMM9); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 64, X64_XMM10); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 80, X64_XMM11); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 96, X64_XMM12); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 112, X64_XMM13); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 128, X64_XMM14); - x64_sse_movdqa_membase_reg(*fp, X64_RSP, 144, X64_XMM15); + /* reserve space to save XMM6-XMM15 registers */ + x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 88, 8); + + x64_mov_membase_reg(*fp, X64_RSP, -64, X64_RBX, 8); + x64_mov_membase_reg(*fp, X64_RSP, -56, X64_RSI, 8); + + x64_sse_movaps_membase_reg(*fp, X64_RSP, -48, X64_XMM6); + x64_sse_movaps_membase_reg(*fp, X64_RSP, -32, X64_XMM7); + x64_sse_movaps_membase_reg(*fp, X64_RSP, -16, X64_XMM8); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 0, X64_XMM9); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 16, X64_XMM10); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 32, X64_XMM11); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 48, X64_XMM12); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 64, X64_XMM13); + + /* use the shadow space to save last 2 registers */ + x64_sse_movaps_membase_reg(*fp, X64_RSP, 96, X64_XMM14); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 112, X64_XMM15); #else x64_push_reg(*fp, X64_RBP); x64_push_reg(*fp, X64_RBX); @@ -244,7 +243,7 @@ static FFTS_INLINE void generate_transform_init(insns_t **fp) x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_RSI, 0); /* set "pointer" to twiddle factors */ - x64_mov_reg_membase(*fp, X64_RDI, X64_RCX, 0x20, 8); + x64_mov_reg_membase(*fp, X64_R9, X64_RCX, 0x20, 8); #else size_t len; @@ -260,7 +259,9 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) { insns_t *ins; insns_t *x4_addr; +#ifndef _M_X64 size_t len; +#endif /* to avoid deferring */ ins = *fp; @@ -274,10 +275,10 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R8, 64); x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 96); x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_R8, 0); - x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_RDI, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R9, 0); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); - x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_RDI, 16); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R9, 16); x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM6); x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); @@ -302,10 +303,10 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8); x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9); x64_sse_movaps_membase_reg(ins, X64_R8, 96, X64_XMM10); - x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RDI, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R9, 32); x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80); x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); - x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RDI, 48); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48); x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM0); x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); @@ -370,7 +371,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) #ifdef _M_X64 /* input */ - x64_mov_reg_reg(ins, X64_RAX, X64_RDI, 8); + x64_mov_reg_reg(ins, X64_RAX, X64_R9, 8); /* output */ x64_mov_reg_reg(ins, X64_RCX, X64_R8, 8); diff --git a/src/sse_win64.s b/src/sse_win64.s index c92358f..6b71a2f 100644 --- a/src/sse_win64.s +++ b/src/sse_win64.s @@ -58,12 +58,12 @@ leaf_ee_init: # rdx is 'in' base pointer # rbx is loop max count # rsi is constants pointer -# rdi is offsets pointer +# r9 is offsets pointer # r8 is 'out' base pointer # scratch: rax r10 r11 xorl %eax, %eax - movq (%rcx), %rdi + movq (%rcx), %r9 movq 0xe0(%rcx), %rsi # _leaf_ee + 8 needs 16 byte alignment @@ -105,7 +105,7 @@ LEAF_EE_const_7: movaps %xmm3, %xmm15 #83.5 shufps $177, %xmm12, %xmm12 #83.5 movaps %xmm7, %xmm4 #83.5 - movslq (%rdi, %rax, 4), %r10 #83.44 + movslq (%r9, %rax, 4), %r10 #83.44 subps %xmm13, %xmm10 #83.5 subps %xmm14, %xmm3 #83.5 addps %xmm11, %xmm5 #83.5 @@ -146,7 +146,7 @@ LEAF_EE_const_7: movaps %xmm2, %xmm3 #83.5 shufps $177, %xmm12, %xmm12 #83.5 movaps %xmm6, %xmm9 #83.5 - movslq 8(%rdi, %rax, 4), %r11 #83.59 + movslq 8(%r9, %rax, 4), %r11 #83.59 movlhps %xmm4, %xmm3 #83.5 addq $4, %rax shufps $238, %xmm4, %xmm2 #83.5 @@ -205,7 +205,7 @@ LEAF_OO_const_6: LEAF_OO_const_7: movaps 0xFECA(%rdx,%rax,4), %xmm12 #93.5 movaps %xmm14, %xmm13 #93.5 - movslq (%rdi, %rax, 4), %r10 #83.44 + movslq (%r9, %rax, 4), %r10 #83.44 subps %xmm8, %xmm10 #93.5 addps %xmm8, %xmm9 #93.5 addps %xmm11, %xmm2 #93.5 @@ -220,7 +220,7 @@ LEAF_OO_const_7: movaps %xmm2, %xmm9 #93.5 shufps $177, %xmm14, %xmm14 #93.5 movaps %xmm6, %xmm7 #93.5 - movslq 8(%rdi, %rax, 4), %r11 #83.59 + movslq 8(%r9, %rax, 4), %r11 #83.59 addq $4, %rax #92.18 addps %xmm10, %xmm4 #93.5 addps %xmm13, %xmm9 #93.5 @@ -281,9 +281,9 @@ LEAF_EO_const_1: subps %xmm6, %xmm11 #88.5 subps %xmm7, %xmm8 #88.5 addps %xmm7, %xmm9 #88.5 - movslq 8(%rdi, %rax, 4), %r11 #83.59 + movslq 8(%r9, %rax, 4), %r11 #83.59 movaps %xmm10, %xmm2 #88.5 - movslq (%rdi, %rax, 4), %r10 #83.44 + movslq (%r9, %rax, 4), %r10 #83.44 movaps %xmm11, %xmm1 #88.5 shufps $238, %xmm8, %xmm10 #88.5 shufps $238, %xmm9, %xmm11 #88.5 @@ -370,7 +370,7 @@ LEAF_OE_const_0: LEAF_OE_const_1: movaps 0xFECA(%rdx,%rax,4), %xmm7 #70.5 movaps %xmm12, %xmm14 #70.5 - movslq (%rdi, %rax, 4), %r10 #83.44 + movslq (%r9, %rax, 4), %r10 #83.44 addps %xmm8, %xmm9 #70.5 subps %xmm8, %xmm10 #70.5 addps %xmm7, %xmm14 #70.5 @@ -387,7 +387,7 @@ LEAF_OE_const_1: subps %xmm9, %xmm14 #70.5 shufps $238, %xmm12, %xmm5 #70.5 addps %xmm10, %xmm12 #70.5 - movslq 8(%rdi, %rax, 4), %r11 #83.59 + movslq 8(%r9, %rax, 4), %r11 #83.59 movlhps %xmm11, %xmm13 #70.5 movaps %xmm13, (%r8,%r10,4) #70.5 movaps 0x30(%rsi), %xmm13 #70.5 @@ -466,7 +466,7 @@ _x_init: x_init: #endif movaps (%rsi), %xmm3 #34.3 - movq 0x20(%rcx), %rdi + movq 0x20(%rcx), %r9 #ifdef __APPLE__ .globl _x4 _x4: @@ -477,10 +477,10 @@ x4: movaps 64(%r8), %xmm0 #34.3 movaps 96(%r8), %xmm1 #34.3 movaps (%r8), %xmm7 #34.3 - movaps (%rdi), %xmm4 #const + movaps (%r9), %xmm4 #const movaps %xmm7, %xmm9 #34.3 movaps %xmm4, %xmm6 #34.3 - movaps 16(%rdi), %xmm2 #const + movaps 16(%r9), %xmm2 #const mulps %xmm0, %xmm6 #34.3 mulps %xmm1, %xmm4 #34.3 shufps $177, %xmm0, %xmm0 #34.3 @@ -505,10 +505,10 @@ x4: movaps %xmm8, 32(%r8) #34.3 movaps %xmm9, 64(%r8) #34.3 movaps %xmm10, 96(%r8) #34.3 - movaps 32(%rdi), %xmm14 #const #34.3 + movaps 32(%r9), %xmm14 #const #34.3 movaps 80(%r8), %xmm11 #34.3 movaps %xmm14, %xmm0 #34.3 - movaps 48(%rdi), %xmm13 #const #34.3 + movaps 48(%r9), %xmm13 #const #34.3 mulps %xmm11, %xmm0 #34.3 mulps %xmm12, %xmm14 #34.3 shufps $177, %xmm11, %xmm11 #34.3 @@ -544,11 +544,11 @@ _x8_soft: .globl x8_soft x8_soft: #endif - # rax, rcx, rdx, r8, r10, r11 (r9 not used) - # rbx, rdi, rsi + # rax, rcx, rdx, r8, r9, r10, r11 + # rbx, rsi # input - movq %rdi, %rax + movq %r9, %rax # output movq %r8, %rcx -- cgit v1.1 From c223f81387a75217fd986dea9326a2ef7f5daa30 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 11 Nov 2014 13:36:38 +0200 Subject: Add x64_movsxd_reg_memindex --- x64/x64-codegen.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index ad53898..f3b3b15 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -480,6 +480,15 @@ typedef union { x64_codegen_post(inst); \ } while (0) +#define x64_movsxd_reg_memindex(inst, reg, basereg, disp, indexreg, shift) \ + do { \ + x64_codegen_pre(inst); \ + x64_emit_rex(inst,8,(reg),0,(basereg)); \ + *(inst)++ = (unsigned char)0x63; \ + x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \ + x64_codegen_post(inst); \ + } while (0) + #define x64_movsxd_reg_reg(inst,dreg,reg) \ do { \ x64_codegen_pre(inst); \ -- cgit v1.1 From 219d8edbde01596d3426915bfb93ea6105a1eae7 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 11 Nov 2014 13:39:07 +0200 Subject: Rename x64_movhlps_reg_reg to x64_sse_movhlps_reg_reg --- x64/x64-codegen.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index f3b3b15..629189b 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -1422,11 +1422,11 @@ typedef union { #define x64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) +#define x64_sse_movlhps_reg_reg(inst,dreg,sreg) \ + emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) -#define x64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) - -#define x64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) - +#define x64_sse_movhlps_reg_reg(inst,dreg,sreg) \ + emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) #define x64_sse_movups_membase_reg(inst, basereg, disp, reg) \ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) -- cgit v1.1 From 36e24f0144c8f44dc282642c962b4d7003e74909 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 11 Nov 2014 13:48:47 +0200 Subject: generate_leaf_init, generate_leaf_ee, generate_leaf_eo, generate_leaf_oe and generate_leaf_oo Multiple offset constants by 4, and remove multiply by 4 from "offset fixing" loops. --- src/codegen.c | 131 ++------------ src/codegen_sse.h | 509 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 521 insertions(+), 119 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 6c6c887..86c7369 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -100,8 +100,8 @@ static void ffts_elaborate_tree(size_t **p, int N, int leaf_N, int offset) transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { - uint32_t offsets[8] = {0, N, N/2, 3*N/2, N/4, 5*N/4, 7*N/4, 3*N/4}; - uint32_t offsets_o[8] = {0, N, N/2, 3*N/2, 7*N/4, 3*N/4, N/4, 5*N/4}; + uint32_t offsets[8] = {0, 4*N, 2*N, 6*N, N, 5*N, 7*N, 3*N}; + uint32_t offsets_o[8] = {0, 4*N, 2*N, 6*N, 7*N, 3*N, N, 5*N}; int32_t pAddr = 0; int32_t pN = 0; @@ -189,128 +189,33 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp += (vfp_o - vfp_e) / 4; #endif #else - /* generate function */ + /* generate functions */ start = generate_prologue(&fp, p); - loop_count = 4 * p->i0; - -#ifdef _M_X64 - /* set loop counter */ - x86_mov_reg_imm(fp, X86_EBX, loop_count); - - /* clear */ - x86_clear_reg(fp, X86_EAX); - - /* set "pointer" to offsets */ - x64_mov_reg_membase(fp, X64_R9, X64_RCX, 0x0, 8); - - /* set "pointer" to constants */ - x64_mov_reg_membase(fp, X64_RSI, X64_RCX, 0xE0, 8); -#else - /* set loop counter */ - x86_mov_reg_imm(fp, X86_ECX, loop_count); - - /* copy function */ - assert((char*) leaf_ee > (char*) leaf_ee_init); - len = (char*) leaf_ee - (char*) leaf_ee_init; - memcpy(fp, leaf_ee_init, (size_t) len); - fp += len; - - ffts_align_mem16(&fp, 9); -#endif - - /* copy function */ - assert((char*) leaf_oo > (char*) leaf_ee); - len = (char*) leaf_oo - (char*) leaf_ee; - memcpy(fp, leaf_ee, (size_t) len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_ee_offsets[i], 4 * offsets[i]); - } - - fp += len; + + loop_count = 4 * p->i0; + generate_leaf_init(&fp, loop_count); + generate_leaf_ee(&fp, offsets); if (ffts_ctzl(N) & 1) { if (p->i1) { loop_count += 4 * p->i1; - - /* align loop/jump destination */ -#ifdef _M_X64 - x86_mov_reg_imm(fp, X86_EBX, loop_count); - ffts_align_mem16(&fp, 3); -#else - x86_mov_reg_imm(fp, X86_ECX, loop_count); - ffts_align_mem16(&fp, 4); -#endif - - /* copy function */ - assert((char*) leaf_eo > (char*) leaf_oo); - len = (char*) leaf_eo - (char*) leaf_oo; - memcpy(fp, leaf_oo, len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_oo_offsets[i], 4 * offsets_o[i]); - } - - fp += len; + generate_leaf_oo(&fp, loop_count, offsets_o); } - loop_count += 4; - - /* copy function */ - assert((char*) leaf_end > (char*) leaf_oe); - len = (char*) leaf_end - (char*) leaf_oe; - memcpy(fp, leaf_oe, len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_oe_offsets[i], 4 * offsets_o[i]); - } - - fp += len; + loop_count += 4; + generate_leaf_oe(&fp, offsets_o); } else { loop_count += 4; - - /* copy function */ - assert((char*) leaf_oe > (char*) leaf_eo); - len = (char*) leaf_oe - (char*) leaf_eo; - memcpy(fp, leaf_eo, len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_eo_offsets[i], 4 * offsets[i]); - } - - fp += len; + generate_leaf_eo(&fp, offsets); if (p->i1) { loop_count += 4 * p->i1; - - /* align loop/jump destination */ -#ifdef _M_X64 - x86_mov_reg_imm(fp, X86_EBX, loop_count); - ffts_align_mem16(&fp, 3); -#else - x86_mov_reg_imm(fp, X86_ECX, loop_count); - ffts_align_mem16(&fp, 4); -#endif - - /* copy function */ - assert((char*) leaf_eo > (char*) leaf_oo); - len = (char*) leaf_eo - (char*) leaf_oo; - memcpy(fp, leaf_oo, len); - - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_oo_offsets[i], 4 * offsets_o[i]); - } - - fp += len; + generate_leaf_oo(&fp, loop_count, offsets_o); } } if (p->i1) { - uint32_t offsets_oe[8] = {7*N/4, 3*N/4, N/4, 5*N/4, 0, N, 3*N/2, N/2}; + uint32_t offsets_oe[8] = {7*N, 3*N, N, 5*N, 0, 4*N, 6*N, 2*N}; loop_count += 4 * p->i1; @@ -323,15 +228,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N ffts_align_mem16(&fp, 9); #endif - assert((char*) leaf_oo > (char*) leaf_ee); - len = (char*) leaf_oo - (char*) leaf_ee; - memcpy(fp, leaf_ee, len); - - for (i = 0; i < 8; i++) { - IMM32_NI(fp + sse_leaf_ee_offsets[i], 4 * offsets_oe[i]); - } - - fp += len; + generate_leaf_ee(&fp, offsets_oe); } generate_transform_init(&fp); diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 40bfa3f..20c0f00 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -60,9 +60,9 @@ void sse_constants_inv(); // typedef uint8_t insns_t; extern const uint32_t sse_leaf_ee_offsets[8]; -extern const uint32_t sse_leaf_oo_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; extern const uint32_t sse_leaf_oe_offsets[8]; +extern const uint32_t sse_leaf_oo_offsets[8]; #define P(x) (*(*p)++ = x) @@ -153,7 +153,7 @@ static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) static FFTS_INLINE void ffts_align_mem16(uint8_t **p, uint32_t offset) { - int r = (16 - (offset & 0xf)) - ((uintptr_t)(*p) & 0xf); + int r = (16 - (offset & 0xf)) - (int) ((uintptr_t)(*p) & 0xf); r = (16 + r) & 0xf; ffts_insert_nops(p, r); } @@ -345,6 +345,509 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) return x4_addr; } +static FFTS_INLINE void generate_leaf_init(insns_t **fp, uint32_t loop_count) +{ +#ifndef _M_X64 + size_t len; +#endif + + /* to avoid deferring */ + insns_t *ins = *fp; + +#ifdef _M_X64 + /* set loop counter */ + x86_mov_reg_imm(ins, X86_EBX, loop_count); + + /* generate function */ + + /* clear */ + x86_clear_reg(ins, X86_EAX); + + /* set "pointer" to offsets */ + x64_mov_reg_membase(ins, X64_R9, X64_RCX, 0x0, 8); + + /* set "pointer" to constants */ + x64_mov_reg_membase(ins, X64_RSI, X64_RCX, 0xE0, 8); +#else + /* set loop counter */ + x86_mov_reg_imm(ins, X86_ECX, loop_count); + + /* copy function */ + assert((char*) leaf_ee > (char*) leaf_ee_init); + len = (char*) leaf_ee - (char*) leaf_ee_init; + memcpy(ins, leaf_ee_init, (size_t) len); + ins += len; + + /* align loop/jump destination */ + ffts_align_mem16(&ins, 9); +#endif + + *fp = ins; +} + +static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) +{ +#ifdef _M_X64 + insns_t *leaf_ee_loop; +#else + size_t len; + int i; +#endif + + /* to avoid deferring */ + insns_t *ins = *fp; + +#ifdef _M_X64 + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RSI, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RSI, 0); + + /* beginning of the loop (make sure it's 16 byte aligned) */ + leaf_ee_loop = ins; + assert(!(((uintptr_t) leaf_ee_loop) & 0xF)); + + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM3, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM7); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 16); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9); + + /* TODO?? */ + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 16); + + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM3, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM3, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM3); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM3); + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM3, X64_XMM4); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM6); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4); +#else + /* copy function */ + assert((char*) leaf_oo > (char*) leaf_ee); + len = (char*) leaf_oo - (char*) leaf_ee; + memcpy(ins, leaf_ee, (size_t) len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(ins + sse_leaf_ee_offsets[i], offsets[i]); + } + + ins += len; +#endif + + *fp = ins; +} + +static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) +{ +#ifndef _M_X64 + size_t len; + int i; +#endif + + /* to avoid deferring */ + insns_t *ins = *fp; + +#ifdef _M_X64 + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM11); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5); + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15); + x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14); + x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 48); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15); + + /* TODO? */ + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 48); + + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 64); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM12); +#else + /* copy function */ + assert((char*) leaf_oe > (char*) leaf_eo); + len = (char*) leaf_oe - (char*) leaf_eo; + memcpy(ins, leaf_eo, len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(ins + sse_leaf_eo_offsets[i], offsets[i]); + } + + ins += len; +#endif + + *fp = ins; +} + +static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) +{ +#ifndef _M_X64 + size_t len; + int i; +#endif + + /* to avoid deferring */ + insns_t *ins = *fp; + +#ifdef _M_X64 + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RSI, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RSI, 48); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 64); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM14); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); + + /* TODO? */ + x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RSI, 48); + + x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM5); + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM0); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM6); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM9, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM3); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM4); +#else + /* copy function */ + assert((char*) leaf_end > (char*) leaf_oe); + len = (char*) leaf_end - (char*) leaf_oe; + memcpy(ins, leaf_oe, len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(ins + sse_leaf_oe_offsets[i], offsets[i]); + } + + ins += len; +#endif + + *fp = ins; +} + +static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets) +{ +#ifdef _M_X64 + insns_t *leaf_oo_loop; +#else + size_t len; + int i; +#endif + + /* to avoid deferring */ + insns_t *ins = *fp; + +#ifdef _M_X64 + /* align loop/jump destination */ + x86_mov_reg_imm(ins, X86_EBX, loop_count); + ffts_align_mem16(&ins, 3); + + x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RSI, 0); + + /* beginning of the loop (make sure it's 16 byte aligned) */ + leaf_oo_loop = ins; + assert(!(((uintptr_t) leaf_oo_loop) & 0xF)); + + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM6); + x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM1); + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM5); + x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM6); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM4); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM15, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM8); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM2); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4); +#else + /* align loop/jump destination */ + x86_mov_reg_imm(ins, X86_ECX, loop_count); + ffts_align_mem16(&ins, 4); + + /* copy function */ + assert((char*) leaf_eo > (char*) leaf_oo); + len = (char*) leaf_eo - (char*) leaf_oo; + memcpy(ins, leaf_oo, len); + + /* patch offsets */ + for (i = 0; i < 8; i++) { + IMM32_NI(ins + sse_leaf_oo_offsets[i], offsets[i]); + } + + ins += len; +#endif + + *fp = ins; +} + static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) { insns_t *ins; @@ -370,6 +873,8 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) #endif #ifdef _M_X64 + /* generate function */ + /* input */ x64_mov_reg_reg(ins, X64_RAX, X64_R9, 8); -- cgit v1.1 From 332e68112344e53c31d0fd94bbe8d308d9292b16 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 11 Nov 2014 17:56:28 +0200 Subject: Damn AT&T syntax --- src/codegen_sse.h | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index fcab9f3..6cf33bd 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -278,26 +278,26 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_RDI, 16); - x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM6); - x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1); x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM0); - x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM2); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6); - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2); x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM6); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM5); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32); x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112); - x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM9); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM7); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM10); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6); x64_sse_movaps_membase_reg(ins, X64_R8, 0, X64_XMM7); x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8); x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9); @@ -306,27 +306,27 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80); x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RDI, 48); - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM0); - x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12); x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM11); - x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM0); - x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13); x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM0); - x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16); x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48); x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2); - x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM1); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM2); - x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM4); - x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0); x64_sse_movaps_membase_reg(ins, X64_R8, 16, X64_XMM1); x64_sse_movaps_membase_reg(ins, X64_R8, 48, X64_XMM2); x64_sse_movaps_membase_reg(ins, X64_R8, 80, X64_XMM4); -- cgit v1.1 From e22e42ae5515c7f214744f4ad8e417cd86922f4e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 13 Nov 2014 20:12:58 +0200 Subject: Enable compiler warnings --- CMakeLists.txt | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 548a462..d69d490 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,12 +34,19 @@ option(ENABLE_SHARED add_definitions(-DFFTS_CMAKE_GENERATED) if(MSVC) + # enable all warnings but also disable some.. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4 /wd4127") + add_definitions(-D_USE_MATH_DEFINES) -else() +endif(MSVC) + +# GCC specific options +if(CMAKE_COMPILER_IS_GNUCC) include(CheckIncludeFile) include(CheckLibraryExists) - - #set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -pipe -Wall") + + # enable all warnings + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra") # some systems need libm for some of the math functions to work check_library_exists(m pow "" HAVE_LIBM) @@ -47,7 +54,7 @@ else() list(APPEND CMAKE_REQUIRED_LIBRARIES m) list(APPEND FFTS_EXTRA_LIBRARIES m) endif(HAVE_LIBM) -endif(MSVC) +endif(CMAKE_COMPILER_IS_GNUCC) include_directories(src) include_directories(${CMAKE_CURRENT_BINARY_DIR}) -- cgit v1.1 From 00ea542df2d60144fe1ed97d0906430cb4c7b0ae Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 13 Nov 2014 20:13:34 +0200 Subject: Remove unreachable code --- src/ffts.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 9ceb97f..d6a2b15 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -103,6 +103,9 @@ static FFTS_INLINE int ffts_deny_execute(void *start, size_t len) static FFTS_INLINE int ffts_flush_instruction_cache(void *start, size_t length) { +#ifdef _WIN32 + return !FlushInstructionCache(GetCurrentProcess(), start, length); +#else #ifdef __APPLE__ sys_icache_invalidate(start, length); #elif __ANDROID__ @@ -113,10 +116,9 @@ static FFTS_INLINE int ffts_flush_instruction_cache(void *start, size_t length) #elif __GNUC__ __clear_cache((long) start, (long) start + length); #endif -#elif _WIN32 - return !FlushInstructionCache(GetCurrentProcess(), start, length); -#endif return 0; +#endif +#endif } static FFTS_INLINE void *ffts_vmem_alloc(size_t length) -- cgit v1.1 From 8ade1c0e10443228889dd77ad4e25a54ec45635f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 14 Nov 2014 17:56:26 +0200 Subject: Remove x64_sse_movntps_reg_membase, and add x64_sse_movntps_membase_reg/x64_sse_movntps_memindex_reg --- x64/x64-codegen.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 629189b..1be7d80 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -1461,8 +1461,11 @@ typedef union { #define x64_sse_movaps_reg_reg(inst, dreg, reg) \ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) -#define x64_sse_movntps_reg_membase(inst, dreg, basereg, disp) \ - emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b) +#define x64_sse_movntps_membase_reg(inst, basereg, disp, reg) \ + emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x2b) + +#define x64_sse_movntps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \ + emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x2b) #define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) \ emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18) -- cgit v1.1 From db6d95e7d30566d879253a09c7c318975689107d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 14 Nov 2014 18:04:54 +0200 Subject: Take care of unreferenced parameters --- src/codegen_sse.h | 9 +++++++++ src/ffts_small.c | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 7fdb3da..ebad5ae 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -197,6 +197,9 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) { insns_t *start; + + /* unreferenced parameter */ + (void) p; /* align call destination */ ffts_align_mem16(fp, 0); @@ -263,6 +266,9 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) size_t len; #endif + /* unreferenced parameter */ + (void) sign; + /* to avoid deferring */ ins = *fp; @@ -858,6 +864,9 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) size_t len; #endif + /* unreferenced parameter */ + (void) sign; + /* to avoid deferring */ ins = *fp; diff --git a/src/ffts_small.c b/src/ffts_small.c index 6f700c6..429991e 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -98,6 +98,9 @@ void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out) data_t *dout = (data_t*) out; cdata_t t0, t1, t2, t3, t4, t5, t6, t7; + /* unreferenced parameter */ + (void) p; + t0[0] = din[0]; t0[1] = din[1]; t1[0] = din[4]; @@ -132,6 +135,9 @@ void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out) data_t *dout = (data_t*) out; cdata_t t0, t1, t2, t3, t4, t5, t6, t7; + /* unreferenced parameter */ + (void) p; + t0[0] = din[0]; t0[1] = din[1]; t1[0] = din[4]; @@ -166,6 +172,9 @@ void ffts_firstpass_2(ffts_plan_t *p, const void *in, void *out) data_t *dout = (data_t*) out; cdata_t t0, t1, r0, r1; + /* unreferenced parameter */ + (void) p; + t0[0] = din[0]; t0[1] = din[1]; t1[0] = din[2]; -- cgit v1.1 From da5fcc7eafbcca83c447efe57838c669d399b45a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 14 Nov 2014 18:37:44 +0200 Subject: Remove unused "neon" labels and mark external function as "extern" --- src/codegen_sse.h | 31 +++++++++++++------------------ src/sse_win64.s | 12 ------------ 2 files changed, 13 insertions(+), 30 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index ebad5ae..0191b1d 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -40,24 +40,19 @@ #include #include -void neon_x4(float *, size_t, float *); -void neon_x8(float *, size_t, float *); -void neon_x8_t(float *, size_t, float *); -void leaf_ee_init(); -void leaf_ee(); -void leaf_oo(); -void leaf_eo(); -void leaf_oe(); -void leaf_end(); -void x_init(); -void x4(); -void x8_soft(); -void x8_soft_end(); - -void sse_constants(); -void sse_constants_inv(); - -// typedef uint8_t insns_t; +extern void leaf_ee_init(); +extern void leaf_ee(); +extern void leaf_oo(); +extern void leaf_eo(); +extern void leaf_oe(); +extern void leaf_end(); +extern void x_init(); +extern void x4(); +extern void x8_soft(); +extern void x8_soft_end(); + +extern void sse_constants(); +extern void sse_constants_inv(); extern const uint32_t sse_leaf_ee_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; diff --git a/src/sse_win64.s b/src/sse_win64.s index 6b71a2f..193dedd 100644 --- a/src/sse_win64.s +++ b/src/sse_win64.s @@ -33,18 +33,6 @@ .code64 - .globl _neon_x4 - .align 4 -_neon_x4: - - .globl _neon_x8 - .align 4 -_neon_x8: - - .globl _neon_x8_t - .align 4 -_neon_x8_t: - #ifdef __APPLE__ .globl _leaf_ee_init _leaf_ee_init: -- cgit v1.1 From 804a00300bc62594714ffba61bd1f87fb2bc28f6 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:05:24 +0200 Subject: Add some comments to macro assembly --- src/codegen_sse.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 0191b1d..8a03ae4 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -292,7 +292,10 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4); x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112); @@ -319,7 +322,10 @@ static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14); x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16); x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48); x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); @@ -413,7 +419,10 @@ static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2); x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); @@ -449,7 +458,10 @@ static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM1); x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM1); x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM3, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM6); x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); @@ -466,7 +478,10 @@ static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM3); x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM3); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); @@ -536,7 +551,10 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); @@ -565,7 +583,10 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8); x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5); x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5); x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14); @@ -595,7 +616,10 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM10); @@ -603,8 +627,8 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM3); @@ -655,7 +679,10 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14); @@ -696,12 +723,18 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM5); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM0); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM3); x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM6); @@ -790,8 +823,13 @@ static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM9); x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM5); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); @@ -944,6 +982,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) /* load [output + 6 * output_stride] */ x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RCX, 0, X64_RSI, 1); + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); /* load [input + 3 * input_stride] */ @@ -988,7 +1027,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12); x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12); x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2); x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9); @@ -1011,7 +1053,10 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1); x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10); x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11); x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); -- cgit v1.1 From 91c5679d7dd965e58885226e01baed1f843d5870 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:07:48 +0200 Subject: Optionally define SSE constants in header --- src/codegen_sse.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 8a03ae4..6153a49 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -51,8 +51,26 @@ extern void x4(); extern void x8_soft(); extern void x8_soft_end(); +#ifdef SSE_DEFINE_CONSTANTS +static const FFTS_ALIGN(16) unsigned int sse_constants[20] = { + 0x00000000, 0x80000000, 0x00000000, 0x80000000, + 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, + 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, + 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, + 0x00000000, 0x00000000, 0xbf3504f3, 0x3f3504f3 +}; + +static const FFTS_ALIGN(16) unsigned int sse_constants_inv[20] = { + 0x80000000, 0x00000000, 0x80000000, 0x00000000, + 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, + 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, + 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, + 0x00000000, 0x00000000, 0x3f3504f3, 0xbf3504f3 +}; +#else extern void sse_constants(); extern void sse_constants_inv(); +#endif extern const uint32_t sse_leaf_ee_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; -- cgit v1.1 From 869efff02080d23cfea2b4c1aa79fc8d7de5bb44 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:09:21 +0200 Subject: YASM is no longer needed to build for Windows --- CMakeLists.txt | 42 +++++++++++++++++++++++++----------------- README | 14 +++++++++----- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d69d490..8de7302 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,10 @@ option(ENABLE_SHARED "Enable building a shared library." OFF ) +option(ENABLE_YASM_COMPILE + "Enables compiling with YASM for Windows." OFF +) + add_definitions(-DFFTS_CMAKE_GENERATED) if(MSVC) @@ -85,23 +89,27 @@ if(ENABLE_SSE) ) if(MSVC) - set(CMAKE_ASM-ATT_COMPILER yasm) - enable_language(ASM-ATT) - - add_custom_command( - OUTPUT sse_win64.obj - COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 - -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas - ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Generating sse_win64.obj" - ) - - list(APPEND FFTS_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj - src/sse_win64.s - ) + if(ENABLE_YASM_COMPILE) + set(CMAKE_ASM-ATT_COMPILER yasm) + enable_language(ASM-ATT) + + add_custom_command( + OUTPUT sse_win64.obj + COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 + -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas + ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Generating sse_win64.obj" + ) + + list(APPEND FFTS_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj + src/sse_win64.s + ) + else() + add_definitions(-DSSE_DEFINE_CONSTANTS) + endif(ENABLE_YASM_COMPILE) else() list(APPEND FFTS_SOURCES src/sse.s diff --git a/README b/README index 28224a5..b54ab69 100644 --- a/README +++ b/README @@ -6,16 +6,20 @@ To build for Android, edit and run build_android.sh To build for iOS, edit and run build_iphone.sh To build for Linux or OS X on x86, run -./configure --enable-sse --enable-single --prefix=/usr/local -make -make install + ./configure --enable-sse --enable-single --prefix=/usr/local + make + make install -To build for Windows x64 with MSVC 2005 and YASM v1.3, run +Optionally build for Linux with CMake, run + mkdir build + cmake .. + +To build for Windows x64 with MSVC 2005, run mkdir build cmake .. -G "Visual Studio 8 2005 Win64" Note that 32 bit Windows is not supported at the moment. - + FFTS dynamically generates code at runtime. This can be disabled with --disable-dynamic-code -- cgit v1.1 From bf9406c8c9dbd51dfca01d7e97629d293a277e25 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:27:58 +0200 Subject: Define externals only when needed --- src/codegen_sse.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 6153a49..b7d0850 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -40,17 +40,6 @@ #include #include -extern void leaf_ee_init(); -extern void leaf_ee(); -extern void leaf_oo(); -extern void leaf_eo(); -extern void leaf_oe(); -extern void leaf_end(); -extern void x_init(); -extern void x4(); -extern void x8_soft(); -extern void x8_soft_end(); - #ifdef SSE_DEFINE_CONSTANTS static const FFTS_ALIGN(16) unsigned int sse_constants[20] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000, @@ -68,15 +57,27 @@ static const FFTS_ALIGN(16) unsigned int sse_constants_inv[20] = { 0x00000000, 0x00000000, 0x3f3504f3, 0xbf3504f3 }; #else +extern void leaf_ee_init(); +extern void leaf_ee(); +extern void leaf_eo(); +extern void leaf_oe(); +extern void leaf_oo(); +extern void leaf_end(); + extern void sse_constants(); extern void sse_constants_inv(); -#endif extern const uint32_t sse_leaf_ee_offsets[8]; extern const uint32_t sse_leaf_eo_offsets[8]; extern const uint32_t sse_leaf_oe_offsets[8]; extern const uint32_t sse_leaf_oo_offsets[8]; +extern void x_init(); +extern void x4(); +extern void x8_soft(); +extern void x8_soft_end(); +#endif + #define P(x) (*(*p)++ = x) static void IMM32_NI(uint8_t *p, int32_t imm) -- cgit v1.1 From af8d306ec3eb4bac01af536bda2d88743ae2559c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:43:08 +0200 Subject: Disable type cast warning from data pointer to function pointer --- src/codegen.c | 71 ++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 86c7369..0aeacff 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -191,26 +191,26 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N #else /* generate functions */ start = generate_prologue(&fp, p); - - loop_count = 4 * p->i0; - generate_leaf_init(&fp, loop_count); - generate_leaf_ee(&fp, offsets); + + loop_count = 4 * p->i0; + generate_leaf_init(&fp, loop_count); + generate_leaf_ee(&fp, offsets); if (ffts_ctzl(N) & 1) { if (p->i1) { loop_count += 4 * p->i1; - generate_leaf_oo(&fp, loop_count, offsets_o); + generate_leaf_oo(&fp, loop_count, offsets_o); } - loop_count += 4; - generate_leaf_oe(&fp, offsets_o); + loop_count += 4; + generate_leaf_oe(&fp, offsets_o); } else { loop_count += 4; - generate_leaf_eo(&fp, offsets); + generate_leaf_eo(&fp, offsets); if (p->i1) { loop_count += 4 * p->i1; - generate_leaf_oo(&fp, loop_count, offsets_o); + generate_leaf_oo(&fp, loop_count, offsets_o); } } @@ -221,14 +221,14 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 - x86_mov_reg_imm(fp, X86_EBX, loop_count); + x86_mov_reg_imm(fp, X86_EBX, loop_count); ffts_align_mem16(&fp, 8); #else - x86_mov_reg_imm(fp, X86_ECX, loop_count); + x86_mov_reg_imm(fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 9); #endif - generate_leaf_ee(&fp, offsets_oe); + generate_leaf_ee(&fp, offsets_oe); } generate_transform_init(&fp); @@ -240,17 +240,17 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if (!pN) { #ifdef _M_X64 - x86_mov_reg_imm(fp, X86_EBX, pps[0]); + x86_mov_reg_imm(fp, X86_EBX, pps[0]); #else - x86_mov_reg_imm(fp, X86_ECX, pps[0] / 4); + x86_mov_reg_imm(fp, X86_ECX, pps[0] / 4); #endif } else { int offset = (4 * pps[1]) - pAddr; if (offset) { #ifdef _M_X64 - x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); #else - x64_alu_reg_imm_size(fp, X86_ADD, X64_RDX, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_RDX, offset, 8); #endif } @@ -258,17 +258,17 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int factor = ffts_ctzl(pps[0]) - ffts_ctzl(pN); #ifdef _M_X64 - if (factor > 0) { - x86_shift_reg_imm(fp, X86_SHL, X86_EBX, factor); - } else { - x86_shift_reg_imm(fp, X86_SHR, X86_EBX, -factor); - } + if (factor > 0) { + x86_shift_reg_imm(fp, X86_SHL, X86_EBX, factor); + } else { + x86_shift_reg_imm(fp, X86_SHR, X86_EBX, -factor); + } #else - if (factor > 0) { - x86_shift_reg_imm(fp, X86_SHL, X86_ECX, factor); - } else { - x86_shift_reg_imm(fp, X86_SHR, X86_ECX, -factor); - } + if (factor > 0) { + x86_shift_reg_imm(fp, X86_SHL, X86_ECX, factor); + } else { + x86_shift_reg_imm(fp, X86_SHR, X86_ECX, -factor); + } #endif } } @@ -278,16 +278,16 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N int offset = (int) (ws_is - pLUT); #ifdef _M_X64 - x64_alu_reg_imm_size(fp, X86_ADD, X64_R9, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R9, offset, 8); #else - x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); + x64_alu_reg_imm_size(fp, X86_ADD, X64_R8, offset, 8); #endif } if (pps[0] == 2 * leaf_N) { - x64_call_code(fp, x_4_addr); + x64_call_code(fp, x_4_addr); } else { - x64_call_code(fp, x_8_addr); + x64_call_code(fp, x_8_addr); } pAddr = 4 * pps[1]; @@ -599,5 +599,16 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N free(ps); +#if defined(_MSC_VER) +#pragma warning(push) + + /* disable type cast warning from data pointer to function pointer */ +#pragma warning(disable : 4055) +#endif + return (transform_func_t) start; + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif } \ No newline at end of file -- cgit v1.1 From bcfd774b9d980de45afa643f1c734f799770a870 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 14:59:46 +0200 Subject: Rename src/ffts.h to src/ffts_internal.h to avoid conflicts with include/ffts.h --- CMakeLists.txt | 2 +- src/codegen.c | 2 +- src/codegen.h | 2 +- src/ffts.c | 2 +- src/ffts.h | 238 ---------------------------------------------------- src/ffts_internal.h | 238 ++++++++++++++++++++++++++++++++++++++++++++++++++++ src/ffts_nd.h | 2 +- src/ffts_real.h | 2 +- src/ffts_real_nd.h | 2 +- src/ffts_small.h | 2 +- 10 files changed, 246 insertions(+), 246 deletions(-) delete mode 100644 src/ffts.h create mode 100644 src/ffts_internal.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 8de7302..4adbb64 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -66,7 +66,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}) set(FFTS_SOURCES src/ffts_attributes.h src/ffts.c - src/ffts.h + src/ffts_internal.h src/ffts_nd.c src/ffts_nd.h src/ffts_real.h diff --git a/src/codegen.c b/src/codegen.c index 0aeacff..e1ed11f 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -33,7 +33,7 @@ #include "codegen.h" #include "macros.h" -#include "ffts.h" +#include "ffts_internal.h" #ifdef __arm__ typedef uint32_t insns_t; diff --git a/src/codegen.h b/src/codegen.h index e3c2381..bb5b60b 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -38,7 +38,7 @@ #pragma once #endif -#include "ffts.h" +#include "ffts_internal.h" transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign); diff --git a/src/ffts.c b/src/ffts.c index d6a2b15..545138b 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -31,7 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "ffts.h" +#include "ffts_internal.h" #include "macros.h" #include "patterns.h" #include "ffts_small.h" diff --git a/src/ffts.h b/src/ffts.h deleted file mode 100644 index a8e27b8..0000000 --- a/src/ffts.h +++ /dev/null @@ -1,238 +0,0 @@ -/* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#ifndef __CP_SSE_H__ -#define __CP_SSE_H__ - -//#include "config.h" -//#include "codegen.h" -#include "ffts_attributes.h" -#include "types.h" - -#include -#include -#include -#include -#include -#include - -#define FFTS_PREFIX ffts - -#ifndef FFTS_CAT_PREFIX2 -#define FFTS_CAT_PREFIX2(a,b) a ## b -#endif - -#ifndef FFTS_CAT_PREFIX -#define FFTS_CAT_PREFIX(a,b) FFTS_CAT_PREFIX2(a ## _, b) -#endif - -/* prevent symbol name clashes */ -#ifdef FFTS_PREFIX -#define FUNC_TO_REWRITE FFTS_CAT_PREFIX(FFTS_PREFIX, FUNC_TO_REWRITE) -#endif - -#ifdef __ANDROID__ -#include -#define LOG(s) __android_log_print(ANDROID_LOG_ERROR, "FFTS", s) -#else -#define LOG(s) fprintf(stderr, s) -#endif - -#ifndef M_PI -#define M_PI 3.1415926535897932384626433832795028841971693993751058209 -#endif - -typedef struct _ffts_plan_t ffts_plan_t; - -typedef void (*transform_func_t)(ffts_plan_t *p, const void *in, void *out); - -/** - * Contains all the Information need to perform FFT - * - * - * DO NOT CHANGE THE ORDER OF MEMBERS - * ASSEMBLY CODE USES HARD CODED OFFSETS TO REFERENCE - * SOME OF THESE VARIABES!! - */ -struct _ffts_plan_t { - - /** - * - */ - ptrdiff_t *offsets; -#ifdef DYNAMIC_DISABLED - /** - * Twiddle factors - */ - void *ws; - - /** - * ee - 2 size x size8 - * oo - 2 x size4 in parallel - * oe - - */ - void *oe_ws, *eo_ws, *ee_ws; -#else - void FFTS_ALIGN(32) *ws; - void FFTS_ALIGN(32) *oe_ws, *eo_ws, *ee_ws; -#endif - - /** - * Pointer into an array of precomputed indexes for the input data array - */ - ptrdiff_t *is; - - /** - * Twiddle Factor Indexes - */ - size_t *ws_is; - - /** - * Size of the loops for the base cases - */ - size_t i0, i1, n_luts; - - /** - * Size fo the Transform - */ - size_t N; - void *lastlut; - - /** - * Used in multidimensional Code ?? - */ - size_t *transforms; - - /** - * Pointer to the dynamically generated function - * that will execute the FFT - */ - transform_func_t transform; - - /** - * Pointer to the base memory address of - * of the transform function - */ - void *transform_base; - - /** - * Size of the memory block contain the - * generated code - */ - size_t transform_size; - - /** - * Points to the cosnant variables used by - * the Assembly Code - */ - void *constants; - - // multi-dimensional stuff: - struct _ffts_plan_t **plans; - int rank; - size_t *Ns, *Ms; - void *buf; - - void *transpose_buf; - - /** - * Pointer to the destroy function - * to clean up the plan after use - * (differs for real and multi dimension transforms - */ - void (*destroy)(ffts_plan_t *); - - /** - * Coefficiants for the real valued transforms - */ - float *A, *B; - - size_t i2; -}; - -static FFTS_INLINE void *ffts_aligned_malloc(size_t size) -{ -#if defined(_MSC_VER) - return _aligned_malloc(size, 32); -#else - return valloc(size); -#endif -} - -static FFTS_INLINE void ffts_aligned_free(void *p) -{ -#if defined(_MSC_VER) - _aligned_free(p); -#else - free(p); -#endif -} - -#if GCC_VERSION_AT_LEAST(3,3) -#define ffts_ctzl __builtin_ctzl -#elif defined(_MSC_VER) -#include -#ifdef _M_X64 -#pragma intrinsic(_BitScanForward64) -static __inline unsigned long ffts_ctzl(size_t N) -{ - unsigned long count; - _BitScanForward64((unsigned long*) &count, N); - return count; -} -#else -#pragma intrinsic(_BitScanForward) -static __inline unsigned long ffts_ctzl(size_t N) -{ - unsigned long count; - _BitScanForward((unsigned long*) &count, N); - return count; -} -#endif /* _WIN64 */ -#endif /* _MSC_VER */ - -static FFTS_ALWAYS_INLINE float W_re(float N, float k) -{ - return cos(-2.0 * M_PI * k / N); -} - -static FFTS_ALWAYS_INLINE float W_im(float N, float k) -{ - return sin(-2.0 * M_PI * k / N); -} - -void ffts_free(ffts_plan_t *); -void ffts_execute(ffts_plan_t *, const void *, void *); -ffts_plan_t *ffts_init_1d(size_t N, int sign); - -#endif diff --git a/src/ffts_internal.h b/src/ffts_internal.h new file mode 100644 index 0000000..a8e27b8 --- /dev/null +++ b/src/ffts_internal.h @@ -0,0 +1,238 @@ +/* + + This file is part of FFTS -- The Fastest Fourier Transform in the South + + Copyright (c) 2012, Anthony M. Blake + Copyright (c) 2012, The University of Waikato + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef __CP_SSE_H__ +#define __CP_SSE_H__ + +//#include "config.h" +//#include "codegen.h" +#include "ffts_attributes.h" +#include "types.h" + +#include +#include +#include +#include +#include +#include + +#define FFTS_PREFIX ffts + +#ifndef FFTS_CAT_PREFIX2 +#define FFTS_CAT_PREFIX2(a,b) a ## b +#endif + +#ifndef FFTS_CAT_PREFIX +#define FFTS_CAT_PREFIX(a,b) FFTS_CAT_PREFIX2(a ## _, b) +#endif + +/* prevent symbol name clashes */ +#ifdef FFTS_PREFIX +#define FUNC_TO_REWRITE FFTS_CAT_PREFIX(FFTS_PREFIX, FUNC_TO_REWRITE) +#endif + +#ifdef __ANDROID__ +#include +#define LOG(s) __android_log_print(ANDROID_LOG_ERROR, "FFTS", s) +#else +#define LOG(s) fprintf(stderr, s) +#endif + +#ifndef M_PI +#define M_PI 3.1415926535897932384626433832795028841971693993751058209 +#endif + +typedef struct _ffts_plan_t ffts_plan_t; + +typedef void (*transform_func_t)(ffts_plan_t *p, const void *in, void *out); + +/** + * Contains all the Information need to perform FFT + * + * + * DO NOT CHANGE THE ORDER OF MEMBERS + * ASSEMBLY CODE USES HARD CODED OFFSETS TO REFERENCE + * SOME OF THESE VARIABES!! + */ +struct _ffts_plan_t { + + /** + * + */ + ptrdiff_t *offsets; +#ifdef DYNAMIC_DISABLED + /** + * Twiddle factors + */ + void *ws; + + /** + * ee - 2 size x size8 + * oo - 2 x size4 in parallel + * oe - + */ + void *oe_ws, *eo_ws, *ee_ws; +#else + void FFTS_ALIGN(32) *ws; + void FFTS_ALIGN(32) *oe_ws, *eo_ws, *ee_ws; +#endif + + /** + * Pointer into an array of precomputed indexes for the input data array + */ + ptrdiff_t *is; + + /** + * Twiddle Factor Indexes + */ + size_t *ws_is; + + /** + * Size of the loops for the base cases + */ + size_t i0, i1, n_luts; + + /** + * Size fo the Transform + */ + size_t N; + void *lastlut; + + /** + * Used in multidimensional Code ?? + */ + size_t *transforms; + + /** + * Pointer to the dynamically generated function + * that will execute the FFT + */ + transform_func_t transform; + + /** + * Pointer to the base memory address of + * of the transform function + */ + void *transform_base; + + /** + * Size of the memory block contain the + * generated code + */ + size_t transform_size; + + /** + * Points to the cosnant variables used by + * the Assembly Code + */ + void *constants; + + // multi-dimensional stuff: + struct _ffts_plan_t **plans; + int rank; + size_t *Ns, *Ms; + void *buf; + + void *transpose_buf; + + /** + * Pointer to the destroy function + * to clean up the plan after use + * (differs for real and multi dimension transforms + */ + void (*destroy)(ffts_plan_t *); + + /** + * Coefficiants for the real valued transforms + */ + float *A, *B; + + size_t i2; +}; + +static FFTS_INLINE void *ffts_aligned_malloc(size_t size) +{ +#if defined(_MSC_VER) + return _aligned_malloc(size, 32); +#else + return valloc(size); +#endif +} + +static FFTS_INLINE void ffts_aligned_free(void *p) +{ +#if defined(_MSC_VER) + _aligned_free(p); +#else + free(p); +#endif +} + +#if GCC_VERSION_AT_LEAST(3,3) +#define ffts_ctzl __builtin_ctzl +#elif defined(_MSC_VER) +#include +#ifdef _M_X64 +#pragma intrinsic(_BitScanForward64) +static __inline unsigned long ffts_ctzl(size_t N) +{ + unsigned long count; + _BitScanForward64((unsigned long*) &count, N); + return count; +} +#else +#pragma intrinsic(_BitScanForward) +static __inline unsigned long ffts_ctzl(size_t N) +{ + unsigned long count; + _BitScanForward((unsigned long*) &count, N); + return count; +} +#endif /* _WIN64 */ +#endif /* _MSC_VER */ + +static FFTS_ALWAYS_INLINE float W_re(float N, float k) +{ + return cos(-2.0 * M_PI * k / N); +} + +static FFTS_ALWAYS_INLINE float W_im(float N, float k) +{ + return sin(-2.0 * M_PI * k / N); +} + +void ffts_free(ffts_plan_t *); +void ffts_execute(ffts_plan_t *, const void *, void *); +ffts_plan_t *ffts_init_1d(size_t N, int sign); + +#endif diff --git a/src/ffts_nd.h b/src/ffts_nd.h index a960cad..dd31dd1 100644 --- a/src/ffts_nd.h +++ b/src/ffts_nd.h @@ -34,7 +34,7 @@ #ifndef FFTS_ND_H #define FFTS_ND_H -#include "ffts.h" +#include "ffts_internal.h" #include ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); diff --git a/src/ffts_real.h b/src/ffts_real.h index 81ca80f..f7f3d6a 100644 --- a/src/ffts_real.h +++ b/src/ffts_real.h @@ -38,7 +38,7 @@ #pragma once #endif -#include "ffts.h" +#include "ffts_internal.h" #include ffts_plan_t *ffts_init_1d_real(size_t N, int sign); diff --git a/src/ffts_real_nd.h b/src/ffts_real_nd.h index d23a002..1ad7026 100644 --- a/src/ffts_real_nd.h +++ b/src/ffts_real_nd.h @@ -38,7 +38,7 @@ #pragma once #endif -#include "ffts.h" +#include "ffts_internal.h" #include ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); diff --git a/src/ffts_small.h b/src/ffts_small.h index 5ae48cc..6632a8a 100644 --- a/src/ffts_small.h +++ b/src/ffts_small.h @@ -1,7 +1,7 @@ #ifndef FFTS_SMALL_H #define FFTS_SMALL_H -#include "ffts.h" +#include "ffts_internal.h" void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out); void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out); -- cgit v1.1 From b77da5c3a3342bbb9ddb2bfe75ea8633016ac2da Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 18:20:51 +0200 Subject: Follow the "one definition rule" --- CMakeLists.txt | 6 ++++++ src/ffts.c | 1 + src/ffts_internal.h | 10 +++------- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4adbb64..05b7d2e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,9 +60,14 @@ if(CMAKE_COMPILER_IS_GNUCC) endif(HAVE_LIBM) endif(CMAKE_COMPILER_IS_GNUCC) +include_directories(include) include_directories(src) include_directories(${CMAKE_CURRENT_BINARY_DIR}) +set(FFTS_HEADERS + include/ffts.h +) + set(FFTS_SOURCES src/ffts_attributes.h src/ffts.c @@ -168,6 +173,7 @@ else() endif() add_library(ffts_static + ${FFTS_HEADERS} ${FFTS_SOURCES} ) diff --git a/src/ffts.c b/src/ffts.c index 545138b..ee0102b 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -31,6 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include "ffts.h" #include "ffts_internal.h" #include "macros.h" #include "patterns.h" diff --git a/src/ffts_internal.h b/src/ffts_internal.h index a8e27b8..68a08db 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -31,8 +31,8 @@ */ -#ifndef __CP_SSE_H__ -#define __CP_SSE_H__ +#ifndef FFTS_INTERNAL_H +#define FFTS_INTERNAL_H //#include "config.h" //#include "codegen.h" @@ -231,8 +231,4 @@ static FFTS_ALWAYS_INLINE float W_im(float N, float k) return sin(-2.0 * M_PI * k / N); } -void ffts_free(ffts_plan_t *); -void ffts_execute(ffts_plan_t *, const void *, void *); -ffts_plan_t *ffts_init_1d(size_t N, int sign); - -#endif +#endif /* FFTS_INTERNAL_H */ -- cgit v1.1 From 8d314a602dbc5f354ef9365e3789b6e8cc34b27b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 16 Nov 2014 18:25:28 +0200 Subject: Add forgotten "ffts.h" header --- src/ffts_nd.c | 1 + src/ffts_real_nd.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index f982403..60e78d4 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -32,6 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_nd.h" +#include "ffts.h" #ifdef HAVE_NEON #include "neon.h" diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 05bcc9c..fe9f646 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -33,6 +33,7 @@ #include "ffts_real_nd.h" #include "ffts_real.h" +#include "ffts.h" #ifdef __ARM_NEON__ #include "neon.h" -- cgit v1.1 From b3ff6d450356851f6760883bfd9f501fdcfefa61 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 17 Nov 2014 10:14:06 +0200 Subject: Add comments to SSE constants --- src/codegen_sse.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index b7d0850..739335f 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -42,18 +42,28 @@ #ifdef SSE_DEFINE_CONSTANTS static const FFTS_ALIGN(16) unsigned int sse_constants[20] = { + /* 0.0, -0.0, 0.0, -0.0 */ 0x00000000, 0x80000000, 0x00000000, 0x80000000, + /* 0.707, 0.707, 0.707, 0.707 */ 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, + /* -0.707, 0.707, -0.707, 0.707 */ 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, + /* 1.0, 1.0, 0.707, 0.707 */ 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, + /* 0.0, 0.0, -.707, 0.707 */ 0x00000000, 0x00000000, 0xbf3504f3, 0x3f3504f3 }; static const FFTS_ALIGN(16) unsigned int sse_constants_inv[20] = { + /* -0.0, 0.0, -0.0, 0.0 */ 0x80000000, 0x00000000, 0x80000000, 0x00000000, + /* 0.707, 0.707, 0.707, 0.707 */ 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, + /* 0.707, -0.707, 0.707, -0.707 */ 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, + /* 1.0, 1.0, 0.707, 0.707 */ 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, + /* 0.0, 0.0, 0.707, -0.707 */ 0x00000000, 0x00000000, 0x3f3504f3, 0xbf3504f3 }; #else -- cgit v1.1 From 49c443ba7bc74893422e6f7cee9e569edfef1393 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 17 Nov 2014 12:07:10 +0200 Subject: Added x64_sse_addps_reg_reg_size, x64_sse_mulps_reg_reg_size, x64_sse_subps_reg_reg_size, x64_sse_movhlps_reg_reg_size, x64_sse_movlhps_reg_reg_size and x64_sse_movaps_reg_reg_size --- x64/x64-codegen.h | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/x64/x64-codegen.h b/x64/x64-codegen.h index 1be7d80..02b9907 100644 --- a/x64/x64-codegen.h +++ b/x64/x64-codegen.h @@ -1123,13 +1123,25 @@ typedef union { #define x64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size)) -#define x64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) +#define x64_sse_addps_reg_reg(inst, dreg, reg) \ + emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58) + +#define x64_sse_addps_reg_reg_size(inst, dreg, reg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x58, size) #define x64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e) -#define x64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) +#define x64_sse_mulps_reg_reg(inst, dreg, reg) \ + emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59) + +#define x64_sse_mulps_reg_reg_size(inst, dreg, reg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x59, size) -#define x64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) +#define x64_sse_subps_reg_reg(inst, dreg, reg) \ + emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c) + +#define x64_sse_subps_reg_reg_size(inst, dreg, reg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x5c, size) #define x64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f) @@ -1422,11 +1434,17 @@ typedef union { #define x64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e) -#define x64_sse_movlhps_reg_reg(inst,dreg,sreg) \ +#define x64_sse_movhlps_reg_reg(inst, dreg, sreg) \ + emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) + +#define x64_sse_movhlps_reg_reg_size(inst, dreg, sreg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x12, size) + +#define x64_sse_movlhps_reg_reg(inst, dreg, sreg) \ emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16) -#define x64_sse_movhlps_reg_reg(inst,dreg,sreg) \ - emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12) +#define x64_sse_movlhps_reg_reg_size(inst, dreg, sreg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x16, size) #define x64_sse_movups_membase_reg(inst, basereg, disp, reg) \ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11) @@ -1461,6 +1479,9 @@ typedef union { #define x64_sse_movaps_reg_reg(inst, dreg, reg) \ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28) +#define x64_sse_movaps_reg_reg_size(inst, dreg, reg, size) \ + emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x28, size) + #define x64_sse_movntps_membase_reg(inst, basereg, disp, reg) \ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x2b) -- cgit v1.1 From d77be00aaadd7772b364c10bff41a38e0112f59d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 17 Nov 2014 12:15:41 +0200 Subject: Don't use long NOPs, instead add extra prefix to extend op codes to align branch targets --- src/codegen.c | 12 ++-- src/codegen_sse.h | 203 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 126 insertions(+), 89 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index e1ed11f..9f2921a 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -194,23 +194,26 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N loop_count = 4 * p->i0; generate_leaf_init(&fp, loop_count); - generate_leaf_ee(&fp, offsets); if (ffts_ctzl(N) & 1) { + generate_leaf_ee(&fp, offsets, p->i1 ? 6 : 0); + if (p->i1) { loop_count += 4 * p->i1; - generate_leaf_oo(&fp, loop_count, offsets_o); + generate_leaf_oo(&fp, loop_count, offsets_o, 7); } loop_count += 4; generate_leaf_oe(&fp, offsets_o); } else { + generate_leaf_ee(&fp, offsets, N >= 256 ? 2 : 8); + loop_count += 4; generate_leaf_eo(&fp, offsets); if (p->i1) { loop_count += 4 * p->i1; - generate_leaf_oo(&fp, loop_count, offsets_o); + generate_leaf_oo(&fp, loop_count, offsets_o, N >= 256 ? 4 : 7); } } @@ -222,13 +225,12 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N /* align loop/jump destination */ #ifdef _M_X64 x86_mov_reg_imm(fp, X86_EBX, loop_count); - ffts_align_mem16(&fp, 8); #else x86_mov_reg_imm(fp, X86_ECX, loop_count); ffts_align_mem16(&fp, 9); #endif - generate_leaf_ee(&fp, offsets_oe); + generate_leaf_ee(&fp, offsets_oe, 0); } generate_transform_init(&fp); diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 739335f..558a015 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -404,6 +404,9 @@ static FFTS_INLINE void generate_leaf_init(insns_t **fp, uint32_t loop_count) /* set "pointer" to constants */ x64_mov_reg_membase(ins, X64_RSI, X64_RCX, 0xE0, 8); + + /* use XMM3 for sign change */ + x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); #else /* set loop counter */ x86_mov_reg_imm(ins, X86_ECX, loop_count); @@ -421,7 +424,7 @@ static FFTS_INLINE void generate_leaf_init(insns_t **fp, uint32_t loop_count) *fp = ins; } -static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) +static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets, int extend) { #ifdef _M_X64 insns_t *leaf_ee_loop; @@ -434,39 +437,47 @@ static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RSI, 32); - x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RSI, 0); - + x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_RSI, 32, 1); + /* beginning of the loop (make sure it's 16 byte aligned) */ leaf_ee_loop = ins; assert(!(((uintptr_t) leaf_ee_loop) & 0xF)); x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[0], X64_RAX, 2); x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[2], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[3], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); - + /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); - + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2); x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9); x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[5], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); - x64_sse_movaps_reg_memindex(ins, X64_XMM3, X64_RDX, offsets[6], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[6], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[7], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM8); x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM7); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM14); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM14); x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12); @@ -475,60 +486,74 @@ static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 16); x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); - /* TODO?? */ - x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 16); + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0); + extend--; - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10); x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM3); - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM1); - x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM1); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM8); + + x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0); + extend--; + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); - + /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM8); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM3, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM6); + x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0); + extend--; + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4); - x64_sse_mulps_reg_reg(ins, X64_XMM3, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM0); x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM3); - x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM12); x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7); x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1); x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM8, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM3); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM3); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM8); /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); - x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); - x64_sse_movlhps_reg_reg(ins, X64_XMM3, X64_XMM4); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0); + extend--; + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12); x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7); x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE); - x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM5); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0); + extend--; + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13); x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14); x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE); x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM8); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM4); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM7); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM9); @@ -578,7 +603,6 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); - x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); /* change sign */ @@ -627,12 +651,9 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 48); x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11); x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7); x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15); - - /* TODO? */ - x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 48); - x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 64); x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); @@ -649,9 +670,9 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) /* change sign */ x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); - x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM10); x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10); x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); @@ -660,7 +681,7 @@ static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM0); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM12); #else /* copy function */ @@ -690,7 +711,6 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RSI, 0); x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RDX, offsets[2], X64_RAX, 2); x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6); @@ -710,7 +730,7 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); @@ -725,66 +745,63 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM13); x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RSI, 48); x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13); x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 64); x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5); x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1); x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM14); x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); - - /* TODO? */ - x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RSI, 48); - - x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1); x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12); x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[4], X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4); x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[6], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM9); x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RDX, offsets[7], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[5], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13); x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2); - x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM15); x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM5); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1); /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM0); + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM0); x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM0); + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); - x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6); x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM0); x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM9, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM9, 0xEE); x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4); x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM3); - x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM13); x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13); x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8); x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM10); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM11); x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM0); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM14); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM4); #else @@ -804,7 +821,7 @@ static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) *fp = ins; } -static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets) +static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets, int extend) { #ifdef _M_X64 insns_t *leaf_oo_loop; @@ -819,26 +836,37 @@ static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint #ifdef _M_X64 /* align loop/jump destination */ x86_mov_reg_imm(ins, X86_EBX, loop_count); - ffts_align_mem16(&ins, 3); - x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RSI, 0); - /* beginning of the loop (make sure it's 16 byte aligned) */ leaf_oo_loop = ins; assert(!(((uintptr_t) leaf_oo_loop) & 0xF)); x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[0], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[2], X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM7); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM7); + + x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RDX, offsets[4], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM6); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RDX, offsets[5], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[6], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4); x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[7], X64_RAX, 2); @@ -850,19 +878,23 @@ static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12); x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM9); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); - + /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM5); + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); + + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM5); + x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM3); x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM6); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0); + extend--; + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10); @@ -871,23 +903,26 @@ static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10); x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1); x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); - x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM4); + + x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0); + extend--; + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14); x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14); x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM5); x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15); x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13); x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM15, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM15, 0xEE); x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE); x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE); x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM14); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM7); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM4); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM8); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM5); x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM6); x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM9); x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM2); -- cgit v1.1 From 71f1f4dae77c2f6b335c3e06c13a3ecedf73ccda Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 17 Nov 2014 15:39:46 +0200 Subject: Fix redefinition of ffts_plan_t --- CMakeLists.txt | 1 + src/codegen.c | 1 - src/codegen.h | 1 + src/ffts_internal.h | 2 -- src/ffts_nd.c | 2 +- src/ffts_nd.h | 2 +- src/ffts_real.c | 1 + src/ffts_real.h | 2 +- src/ffts_real_nd.c | 2 +- src/ffts_real_nd.h | 2 +- src/ffts_small.c | 1 + src/ffts_small.h | 2 +- 12 files changed, 10 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 05b7d2e..74847fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -79,6 +79,7 @@ set(FFTS_SOURCES src/ffts_real_nd.c src/ffts_real_nd.h src/ffts_small.c + src/ffts_small.h src/macros.h src/patterns.c src/patterns.h diff --git a/src/codegen.c b/src/codegen.c index 9f2921a..6180f94 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -33,7 +33,6 @@ #include "codegen.h" #include "macros.h" -#include "ffts_internal.h" #ifdef __arm__ typedef uint32_t insns_t; diff --git a/src/codegen.h b/src/codegen.h index bb5b60b..e170ca7 100644 --- a/src/codegen.h +++ b/src/codegen.h @@ -38,6 +38,7 @@ #pragma once #endif +#include "ffts.h" #include "ffts_internal.h" transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N, int sign); diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 68a08db..413bb51 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -72,8 +72,6 @@ #define M_PI 3.1415926535897932384626433832795028841971693993751058209 #endif -typedef struct _ffts_plan_t ffts_plan_t; - typedef void (*transform_func_t)(ffts_plan_t *p, const void *in, void *out); /** diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 60e78d4..3651d1f 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -32,7 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_nd.h" -#include "ffts.h" +#include "ffts_internal.h" #ifdef HAVE_NEON #include "neon.h" diff --git a/src/ffts_nd.h b/src/ffts_nd.h index dd31dd1..a960cad 100644 --- a/src/ffts_nd.h +++ b/src/ffts_nd.h @@ -34,7 +34,7 @@ #ifndef FFTS_ND_H #define FFTS_ND_H -#include "ffts_internal.h" +#include "ffts.h" #include ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); diff --git a/src/ffts_real.c b/src/ffts_real.c index 77c57a0..c2f03b7 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -32,6 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_real.h" +#include "ffts_internal.h" #ifdef HAVE_NEON #include diff --git a/src/ffts_real.h b/src/ffts_real.h index f7f3d6a..81ca80f 100644 --- a/src/ffts_real.h +++ b/src/ffts_real.h @@ -38,7 +38,7 @@ #pragma once #endif -#include "ffts_internal.h" +#include "ffts.h" #include ffts_plan_t *ffts_init_1d_real(size_t N, int sign); diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index fe9f646..8b66333 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -33,7 +33,7 @@ #include "ffts_real_nd.h" #include "ffts_real.h" -#include "ffts.h" +#include "ffts_internal.h" #ifdef __ARM_NEON__ #include "neon.h" diff --git a/src/ffts_real_nd.h b/src/ffts_real_nd.h index 1ad7026..d23a002 100644 --- a/src/ffts_real_nd.h +++ b/src/ffts_real_nd.h @@ -38,7 +38,7 @@ #pragma once #endif -#include "ffts_internal.h" +#include "ffts.h" #include ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); diff --git a/src/ffts_small.c b/src/ffts_small.c index 429991e..8fa373f 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -32,6 +32,7 @@ */ #include "ffts_small.h" +#include "ffts_internal.h" #include "macros.h" #include diff --git a/src/ffts_small.h b/src/ffts_small.h index 6632a8a..5ae48cc 100644 --- a/src/ffts_small.h +++ b/src/ffts_small.h @@ -1,7 +1,7 @@ #ifndef FFTS_SMALL_H #define FFTS_SMALL_H -#include "ffts_internal.h" +#include "ffts.h" void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out); void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out); -- cgit v1.1 From d896a265d5c75a15333b0998cb7eaa465ecb8419 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 20 Nov 2014 16:54:24 +0200 Subject: Use _mm_movelh_ps instead of _mm_shuffle_ps to implement VUNPACKLO --- src/macros-sse.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/macros-sse.h b/src/macros-sse.h index 85cd02d..cab822c 100644 --- a/src/macros-sse.h +++ b/src/macros-sse.h @@ -43,7 +43,7 @@ typedef __m128 V; #define VADD _mm_add_ps #define VSUB _mm_sub_ps #define VMUL _mm_mul_ps -//#define VLIT4 _mm_set_ps +#define VLIT4 _mm_set_ps #define VXOR _mm_xor_ps #define VST _mm_store_ps #define VLD _mm_load_ps @@ -51,12 +51,10 @@ typedef __m128 V; #define VSWAPPAIRS(x) (_mm_shuffle_ps(x,x,_MM_SHUFFLE(2,3,0,1))) #define VUNPACKHI(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,3,2))) -#define VUNPACKLO(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(1,0,1,0))) +#define VUNPACKLO(x,y) (_mm_movelh_ps(x,y)) #define VBLEND(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,1,0))) -#define VLIT4 _mm_set_ps - #define VDUPRE(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(2,2,0,0))) #define VDUPIM(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(3,3,1,1))) -- cgit v1.1 From 8b69bdcb5fbc1f26e95794d5cfecd456dcc76c1d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 20 Nov 2014 17:07:02 +0200 Subject: Include system specific macros in macros.h --- src/codegen.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/codegen.c b/src/codegen.c index 6180f94..1556d63 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -48,7 +48,6 @@ typedef uint8_t insns_t; #include "vfp.h" #else #include "codegen_sse.h" -#include "macros-sse.h" #endif #include -- cgit v1.1 From d721afb330499d882d74349780c70e6cc9a4a8f9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 20 Nov 2014 17:12:24 +0200 Subject: Add static transform to library --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 74847fb..85d3108 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,6 +80,8 @@ set(FFTS_SOURCES src/ffts_real_nd.h src/ffts_small.c src/ffts_small.h + src/ffts_static.c + src/ffts_static.h src/macros.h src/patterns.c src/patterns.h -- cgit v1.1 From f051eee4ed371c9bf986833f574ec7789d0c71bf Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 20 Nov 2014 19:25:18 +0200 Subject: Enable static first pass and "unroll" recursion to base cases; N <= 128 Temporarily break support for static neon. --- src/ffts_static.c | 514 +++++++++++++++++++++++++++++++++++++++++++++++------- src/ffts_static.h | 20 ++- 2 files changed, 464 insertions(+), 70 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index dda5f51..cdecf1b 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -30,73 +30,465 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + #include "ffts_static.h" +#include "ffts_internal.h" +#include "macros.h" + +#include + +static const FFTS_ALIGN(16) data_t ffts_constants[16] = { + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + -0.70710678118654746171500846685376, 0.70710678118654746171500846685376, + -0.70710678118654746171500846685376, 0.70710678118654746171500846685376, + 1.0, 1.0, + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + 0.0, 0.0, + -0.70710678118654746171500846685376, 0.70710678118654746171500846685376 +}; + +static const FFTS_ALIGN(16) data_t ffts_constants_inv[16] = { + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + 0.70710678118654746171500846685376, -0.70710678118654746171500846685376, + 0.70710678118654746171500846685376, -0.70710678118654746171500846685376, + 1.0, 1.0, + 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, + 0.0, 0.0, + 0.70710678118654746171500846685376, -0.70710678118654746171500846685376 +}; + +static FFTS_INLINE void K_0(int inv, V *r0, V *r1, V *r2, V *r3) +{ + V t0, t1, t2, t3; + + t0 = *r0; + t1 = *r1; + + t2 = VADD(*r2, *r3); + t3 = IMULI(inv, VSUB(*r2, *r3)); + + *r0 = VADD(t0, t2); + *r2 = VSUB(t0, t2); + *r1 = VSUB(t1, t3); + *r3 = VADD(t1, t3); +} + +static FFTS_INLINE void L_2(const data_t *FFTS_RESTRICT i0, + const data_t *FFTS_RESTRICT i1, + const data_t *FFTS_RESTRICT i2, + const data_t *FFTS_RESTRICT i3, + V *r0, + V *r1, + V *r2, + V *r3) +{ + V t0, t1, t2, t3; + + t0 = VLD(i0); + t1 = VLD(i1); + t2 = VLD(i2); + t3 = VLD(i3); + + *r0 = VADD(t0, t1); + *r1 = VSUB(t0, t1); + *r2 = VADD(t2, t3); + *r3 = VSUB(t2, t3); +} + +static FFTS_INLINE void L_4(int inv, + const data_t *FFTS_RESTRICT i0, + const data_t *FFTS_RESTRICT i1, + const data_t *FFTS_RESTRICT i2, + const data_t *FFTS_RESTRICT i3, + V *r0, + V *r1, + V *r2, + V *r3) +{ + V t0, t1, t2, t3, t4, t5, t6, t7; + + t0 = VLD(i0); + t1 = VLD(i1); + t2 = VLD(i2); + t3 = VLD(i3); + + t4 = VADD(t0, t1); + t5 = VSUB(t0, t1); + t6 = VADD(t2, t3); + t7 = IMULI(inv, VSUB(t2, t3)); + + *r0 = VADD(t4, t6); + *r2 = VSUB(t4, t6); + *r1 = VSUB(t5, t7); + *r3 = VADD(t5, t7); +} + +static FFTS_INLINE void LEAF_EE(data_t *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const data_t *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) +{ + const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + + V r0, r1, r2, r3, r4, r5, r6, r7; + + data_t *out0 = out + os[0]; + data_t *out1 = out + os[1]; + + L_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + L_2(in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); + + K_0(inv, &r0, &r2, &r4, &r6); + K_N(inv, VLD(LUT + 0), VLD(LUT + 4), &r1, &r3, &r5, &r7); + TX2(&r0, &r1); + TX2(&r2, &r3); + TX2(&r4, &r5); + TX2(&r6, &r7); + + S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); +} + +static FFTS_INLINE void LEAF_EE2(data_t *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const data_t *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) +{ + const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + + V r0, r1, r2, r3, r4, r5, r6, r7; + + data_t *out0 = out + os[0]; + data_t *out1 = out + os[1]; + + L_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r0, &r1, &r2, &r3); + L_2(in + is[0], in + is[1], in + is[3], in + is[2], &r4, &r5, &r6, &r7); + + K_0(inv, &r0, &r2, &r4, &r6); + K_N(inv, VLD(LUT + 0), VLD(LUT + 4), &r1, &r3, &r5, &r7); + TX2(&r0, &r1); + TX2(&r2, &r3); + TX2(&r4, &r5); + TX2(&r6, &r7); + + S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); +} + +static FFTS_INLINE void LEAF_EO(data_t *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const data_t *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) +{ + const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + + V r0, r1, r2, r3, r4, r5, r6, r7; + + data_t *out0 = out + os[0]; + data_t *out1 = out + os[1]; + + L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + L_2_4(inv, in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); + + S_4(r2, r3, r7, r6, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r0, &r1, &r4, &r5); + S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); +} + +static FFTS_INLINE void LEAF_OE(data_t *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const data_t *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) +{ + const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + + V r0, r1, r2, r3, r4, r5, r6, r7; + + data_t *out0 = out + os[0]; + data_t *out1 = out + os[1]; + + L_4_2(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); + + S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r6, &r7, &r2, &r3); + S_4(r6, r7, r2, r3, out1 + 0, out1 + 4, out1 + 8, out1 + 12); +} + +static FFTS_INLINE void LEAF_OO(data_t *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const data_t *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) +{ + V r0, r1, r2, r3, r4, r5, r6, r7; + + data_t *out0 = out + os[0]; + data_t *out1 = out + os[1]; -void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) { - if(N > 16) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; - float *ws = ((float *)(p->ws)) + (p->ws_is[__builtin_ctzl(N)-4] << 1); - - ffts_static_rec_i(p, data, N2); - ffts_static_rec_i(p, data + N1, N3); - ffts_static_rec_i(p, data + N1 + N2, N3); - ffts_static_rec_i(p, data + N, N2); - ffts_static_rec_i(p, data + N + N1, N2); - - if(N == p->N) { - neon_static_x8_t_i(data, N, ws); - }else{ - neon_static_x8_i(data, N, ws); - } - - }else if(N==16){ - neon_static_x4_i(data, N, p->ws); - } + L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); + S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + S_4(r2, r3, r6, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); } -void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) { - if(N > 16) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; - float *ws = ((float *)(p->ws)) + (p->ws_is[__builtin_ctzl(N)-4] << 1); - - ffts_static_rec_f(p, data, N2); - ffts_static_rec_f(p, data + N1, N3); - ffts_static_rec_f(p, data + N1 + N2, N3); - ffts_static_rec_f(p, data + N, N2); - ffts_static_rec_f(p, data + N + N1, N2); - - if(N == p->N) { - neon_static_x8_t_f(data, N, ws); - }else{ - neon_static_x8_f(data, N, ws); - } - - }else if(N==16){ - neon_static_x4_f(data, N, p->ws); - } +static FFTS_INLINE void X_4(int inv, + data_t *FFTS_RESTRICT data, + size_t N, + const data_t *FFTS_RESTRICT LUT) +{ + size_t i; + + for (i = 0; i < N/8; i++) { + V r0 = VLD(data); + V r1 = VLD(data + 2*N/4); + V r2 = VLD(data + 4*N/4); + V r3 = VLD(data + 6*N/4); + + K_N(inv, VLD(LUT), VLD(LUT + 4), &r0, &r1, &r2, &r3); + + VST(data , r0); + VST(data + 2*N/4, r1); + VST(data + 4*N/4, r2); + VST(data + 6*N/4, r3); + + LUT += 8; + data += 4; + } } -void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out) { +static FFTS_INLINE void X_8(int inv, + data_t *FFTS_RESTRICT data0, + size_t N, + const data_t *FFTS_RESTRICT LUT) +{ + data_t *data1 = data0 + 1*N/4; + data_t *data2 = data0 + 2*N/4; + data_t *data3 = data0 + 3*N/4; + data_t *data4 = data0 + 4*N/4; + data_t *data5 = data0 + 5*N/4; + data_t *data6 = data0 + 6*N/4; + data_t *data7 = data0 + 7*N/4; + size_t i; + + for (i = 0; i < N/16; i++) { + V r0, r1, r2, r3, r4, r5, r6, r7; + + r0 = VLD(data0); + r1 = VLD(data1); + r2 = VLD(data2); + r3 = VLD(data3); + + K_N(inv, VLD(LUT), VLD(LUT + 4), &r0, &r1, &r2, &r3); + r4 = VLD(data4); + r6 = VLD(data6); + + K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r0, &r2, &r4, &r6); + r5 = VLD(data5); + r7 = VLD(data7); + + K_N(inv, VLD(LUT + 16), VLD(LUT + 20), &r1, &r3, &r5, &r7); + LUT += 24; + + VST(data0, r0); + data0 += 4; + + VST(data1, r1); + data1 += 4; + + VST(data2, r2); + data2 += 4; + + VST(data3, r3); + data3 += 4; - if(__builtin_ctzl(p->N) & 1) - neon_static_o_f(p, in, out); - else - neon_static_e_f(p, in, out); - ffts_static_rec_f(p, out, p->N); + VST(data4, r4); + data4 += 4; + + VST(data5, r5); + data5 += 4; + + VST(data6, r6); + data6 += 4; + + VST(data7, r7); + data7 += 4; + } +} + +static FFTS_INLINE void ffts_static_firstpass_odd(float *const FFTS_RESTRICT out, + const float *FFTS_RESTRICT in, + const ffts_plan_t *FFTS_RESTRICT p, + int inv) +{ + size_t i, i0 = p->i0, i1 = p->i1; + const ptrdiff_t *is = (const ptrdiff_t*) p->is; + const ptrdiff_t *os = (const ptrdiff_t*) p->offsets; + + for (i = i0; i > 0; --i) { + LEAF_EE(out, os, in, is, inv); + in += 4; + os += 2; + } + + for (i = i1; i > 0; --i) { + LEAF_OO(out, os, in, is, inv); + in += 4; + os += 2; + } + + LEAF_OE(out, os, in, is, inv); + in += 4; + os += 2; + + for (i = i1; i > 0; --i) { + LEAF_EE2(out, os, in, is, inv); + in += 4; + os += 2; + } } +static FFTS_INLINE void ffts_static_firstpass_even(float *FFTS_RESTRICT out, + const float *FFTS_RESTRICT in, + const ffts_plan_t *FFTS_RESTRICT p, + int inv) +{ + size_t i, i0 = p->i0, i1 = p->i1; + const ptrdiff_t *is = (const ptrdiff_t*) p->is; + const ptrdiff_t *os = (const ptrdiff_t*) p->offsets; -void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out) { + for(i = i0; i > 0; --i) { + LEAF_EE(out, os, in, is, inv); + in += 4; + os += 2; + } - if(__builtin_ctzl(p->N) & 1) - neon_static_o_i(p, in, out); - else - neon_static_e_i(p, in, out); - ffts_static_rec_i(p, out, p->N); + LEAF_EO(out, os, in, is, inv); + in += 4; + os += 2; + + for (i = i1; i > 0; --i) { + LEAF_OO(out, os, in, is, inv); + in += 4; + os += 2; + } + + for (i = i1; i > 0; --i) { + LEAF_EE2(out, os, in, is, inv); + in += 4; + os += 2; + } } -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: + +void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) +{ + const float *ws = (float*) p->ws; + + if (N > 128) { + size_t N1 = N >> 1; + size_t N2 = N >> 2; + size_t N3 = N >> 3; + + ffts_static_rec_f(p, data , N2); + ffts_static_rec_f(p, data + N1 , N3); + ffts_static_rec_f(p, data + N1 + N2, N3); + ffts_static_rec_f(p, data + N , N2); + ffts_static_rec_f(p, data + N + N1 , N2); + + X_8(0, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + X_8(0, data + 0, 32, ws1); + + X_4(0, data + 64, 16, ws); + X_4(0, data + 96, 16, ws); + + X_8(0, data + 128, 32, ws1); + X_8(0, data + 192, 32, ws1); + + X_8(0, data, N, ws + (p->ws_is[3] << 1)); + } else if (N == 64) { + X_4(0, data + 0, 16, ws); + X_4(0, data + 64, 16, ws); + X_4(0, data + 96, 16, ws); + + X_8(0, data + 0, N, ws + (p->ws_is[2] << 1)); + } else if (N == 32) { + X_8(0, data, N, ws + (p->ws_is[1] << 1)); + } else { + assert(N == 16); + X_4(0, data, N, ws); + } +} + +void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) +{ + float *ws = (float*) p->ws; + + if (N > 128) { + size_t N1 = N >> 1; + size_t N2 = N >> 2; + size_t N3 = N >> 3; + + ffts_static_rec_i(p, data , N2); + ffts_static_rec_i(p, data + N1 , N3); + ffts_static_rec_i(p, data + N1 + N2, N3); + ffts_static_rec_i(p, data + N , N2); + ffts_static_rec_i(p, data + N + N1 , N2); + + X_8(1, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + X_8(1, data + 0, 32, ws1); + + X_4(1, data + 64, 16, ws); + X_4(1, data + 96, 16, ws); + + X_8(1, data + 128, 32, ws1); + X_8(1, data + 192, 32, ws1); + + X_8(1, data, N, ws + (p->ws_is[3] << 1)); + } else if (N == 64) { + X_4(1, data + 0, 16, ws); + X_4(1, data + 64, 16, ws); + X_4(1, data + 96, 16, ws); + + X_8(1, data + 0, N, ws + (p->ws_is[2] << 1)); + } else if (N == 32) { + X_8(1, data, N, ws + (p->ws_is[1] << 1)); + } else { + assert(N == 16); + X_4(1, data, N, ws); + } +} + +void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out) +{ + if (ffts_ctzl(p->N) & 1) { + ffts_static_firstpass_odd(out, in, p, 0); + } else { + ffts_static_firstpass_even(out, in, p, 0); + } + + ffts_static_rec_f(p, out, p->N); +} + +void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out) +{ + if (ffts_ctzl(p->N) & 1) { + ffts_static_firstpass_odd(out, in, p, 1); + } else { + ffts_static_firstpass_even(out, in, p, 1); + } + + ffts_static_rec_i(p, out, p->N); +} \ No newline at end of file diff --git a/src/ffts_static.h b/src/ffts_static.h index d854053..e599d80 100644 --- a/src/ffts_static.h +++ b/src/ffts_static.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - + Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - + Copyright (c) 2012, The University of Waikato + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,11 +31,14 @@ */ -#ifndef __FFTS_STATIC_H__ -#define __FFTS_STATIC_H__ +#ifndef FFTS_STATIC_H +#define FFTS_STATIC_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif #include "ffts.h" -#include "neon.h" void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) ; void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out); @@ -43,5 +46,4 @@ void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out); void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) ; void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out); -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_STATIC_H */ -- cgit v1.1 From 9bbba5621ab6b84d878a4a7710ee59a63b31ea59 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 21 Nov 2014 16:33:40 +0200 Subject: Define SSE constants when HAVE_SSE is defined --- src/codegen.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen.c b/src/codegen.c index 1556d63..5cd7616 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -132,7 +132,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N pps = ps; -#ifdef __x86_64__ +#ifdef HAVE_SSE if (sign < 0) { p->constants = sse_constants; } else { -- cgit v1.1 From ab587e6f33d415334ed706f6b1e021564aee8feb Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 21 Nov 2014 16:43:51 +0200 Subject: Improve CMake build system to automatically detect SSE, and support 32 bit Windows build --- CMakeLists.txt | 163 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 87 insertions(+), 76 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 85d3108..2cfe43d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,10 +11,6 @@ if(NOT CMAKE_BUILD_TYPE) endif(NOT CMAKE_BUILD_TYPE) # common options -option(ENABLE_SSE - "Enables the use of SSE instructions." ON -) - option(ENABLE_NEON "Enables the use of NEON instructions." OFF ) @@ -27,26 +23,36 @@ option(DISABLE_DYNAMIC_CODE "Disables the use of dynamic machine code generation." OFF ) +option(ENABLE_RUNTIME_DYNAMIC_CODE + "Enables the runtime generation of dynamic machine code." ON +) + option(ENABLE_SHARED "Enable building a shared library." OFF ) -option(ENABLE_YASM_COMPILE - "Enables compiling with YASM for Windows." OFF -) +include(CheckIncludeFile) add_definitions(-DFFTS_CMAKE_GENERATED) +# check if the platform has support for SSE SIMD extension +check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) +if(HAVE_XMMINTRIN_H) + add_definitions(-DHAVE_SSE) +else() + # check if the platform has support NEON SIMD extension + check_include_file(arm_neon.h HAVE_ARM_NEON_H) + if(HAVE_ARM_NEON_H) + endif(HAVE_ARM_NEON_H) +endif(HAVE_XMMINTRIN_H) + +# compiler settings if(MSVC) # enable all warnings but also disable some.. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4 /wd4127") add_definitions(-D_USE_MATH_DEFINES) -endif(MSVC) - -# GCC specific options -if(CMAKE_COMPILER_IS_GNUCC) - include(CheckIncludeFile) +elseif(CMAKE_COMPILER_IS_GNUCC) include(CheckLibraryExists) # enable all warnings @@ -58,7 +64,11 @@ if(CMAKE_COMPILER_IS_GNUCC) list(APPEND CMAKE_REQUIRED_LIBRARIES m) list(APPEND FFTS_EXTRA_LIBRARIES m) endif(HAVE_LIBM) -endif(CMAKE_COMPILER_IS_GNUCC) + + if(HAVE_XMMINTRIN_H) + add_definitions(-msse) + endif(HAVE_XMMINTRIN_H) +endif(MSVC) include_directories(include) include_directories(src) @@ -75,90 +85,97 @@ set(FFTS_SOURCES src/ffts_nd.c src/ffts_nd.h src/ffts_real.h - src/ffts_real.c + src/ffts_real.c src/ffts_real_nd.c src/ffts_real_nd.h src/ffts_small.c src/ffts_small.h - src/ffts_static.c - src/ffts_static.h src/macros.h src/patterns.c src/patterns.h src/types.h ) -if(ENABLE_SSE) - add_definitions(-DHAVE_SSE) - add_definitions(-D__x86_64__) - - list(APPEND FFTS_SOURCES - src/macros-sse.h - ) - - if(MSVC) - if(ENABLE_YASM_COMPILE) - set(CMAKE_ASM-ATT_COMPILER yasm) - enable_language(ASM-ATT) - - add_custom_command( - OUTPUT sse_win64.obj - COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 - -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas - ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Generating sse_win64.obj" - ) - - list(APPEND FFTS_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj - src/sse_win64.s - ) - else() - add_definitions(-DSSE_DEFINE_CONSTANTS) - endif(ENABLE_YASM_COMPILE) - else() - list(APPEND FFTS_SOURCES - src/sse.s - ) - else() - add_definitions(-msse2) - endif(MSVC) -endif() - if(ENABLE_NEON) if(DISABLE_DYNAMIC_CODE) list(APPEND FFTS_SOURCES - source/neon_static_f.s - source/neon_static_i.s + src/neon_static_f.s + src/neon_static_i.s ) else() list(APPEND FFTS_SOURCES - source/neon.s - source/arch/neon.c + src/neon.s + src/arch/neon.c ) - endif() + endif(DISABLE_DYNAMIC_CODE) add_definitions(-DHAVE_NEON) -endif() - -if(ENABLE_VFP) +elseif(ENABLE_VFP) list(APPEND FFTS_SOURCES - source/vfp.s - source/arch/vfp.c + src/vfp.s + src/arch/vfp.c ) add_definitions(-DHAVE_VFP) -endif() +elseif(HAVE_XMMINTRIN_H) + add_definitions(-DHAVE_SSE) -if(ENABLE_SINGLE) - add_definitions(-DHAVE_SINGLE) -endif() + list(APPEND FFTS_SOURCES + src/macros-sse.h + ) + + if(NOT DISABLE_DYNAMIC_CODE) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + list(APPEND FFTS_SOURCES + src/codegen_sse.h + ) + + if(MSVC) + if(NOT ENABLE_RUNTIME_DYNAMIC_CODE) + # YASM supports x86 GAS syntax + set(CMAKE_ASM-ATT_COMPILER yasm) + enable_language(ASM-ATT) + + if(CMAKE_ASM-ATT_COMPILER_WORKS) + add_custom_command( + OUTPUT sse_win64.obj + COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 + -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas + ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Generating sse_win64.obj" + ) + + list(APPEND FFTS_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj + src/sse_win64.s + ) + else() + message(WARNING "YASM is required, enabling runtime dynamic code.") + set(ENABLE_RUNTIME_DYNAMIC_CODE ON) + endif(CMAKE_ASM-ATT_COMPILER_WORKS) + endif(NOT ENABLE_RUNTIME_DYNAMIC_CODE) + + if(ENABLE_RUNTIME_DYNAMIC_CODE) + add_definitions(-DSSE_DEFINE_CONSTANTS) + endif(ENABLE_RUNTIME_DYNAMIC_CODE) + else() + list(APPEND FFTS_SOURCES + src/sse.s + ) + endif(MSVC) + else() + message(WARNING "Dynamic code is only supported with x64, disabling dynamic code.") + set(DISABLE_DYNAMIC_CODE ON) + endif(CMAKE_SIZEOF_VOID_P EQUAL 8) + endif(NOT DISABLE_DYNAMIC_CODE) +endif(ENABLE_NEON) if(DISABLE_DYNAMIC_CODE) list(APPEND FFTS_SOURCES src/ffts_static.c + src/ffts_static.h ) add_definitions(-DDYNAMIC_DISABLED) @@ -167,13 +184,7 @@ else() src/codegen.c src/codegen.h ) - - if(ENABLE_SSE) - list(APPEND FFTS_SOURCES - src/codegen_sse.h - ) - endif(ENABLE_SSE) -endif() +endif(DISABLE_DYNAMIC_CODE) add_library(ffts_static ${FFTS_HEADERS} -- cgit v1.1 From 06d9dc191a2d5ffcebeecaebd1d082c536fb9a8a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 24 Nov 2014 12:39:55 +0200 Subject: Update README, build for 32 bit and 64 bit Windows are now supported --- CMakeLists.txt | 16 ++++++++-------- README | 11 ++++------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2cfe43d..16ca9af 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,17 +54,17 @@ if(MSVC) add_definitions(-D_USE_MATH_DEFINES) elseif(CMAKE_COMPILER_IS_GNUCC) include(CheckLibraryExists) - + # enable all warnings set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra") - + # some systems need libm for some of the math functions to work check_library_exists(m pow "" HAVE_LIBM) if(HAVE_LIBM) list(APPEND CMAKE_REQUIRED_LIBRARIES m) list(APPEND FFTS_EXTRA_LIBRARIES m) endif(HAVE_LIBM) - + if(HAVE_XMMINTRIN_H) add_definitions(-msse) endif(HAVE_XMMINTRIN_H) @@ -129,13 +129,13 @@ elseif(HAVE_XMMINTRIN_H) list(APPEND FFTS_SOURCES src/codegen_sse.h ) - + if(MSVC) if(NOT ENABLE_RUNTIME_DYNAMIC_CODE) # YASM supports x86 GAS syntax - set(CMAKE_ASM-ATT_COMPILER yasm) + set(CMAKE_ASM-ATT_COMPILER yasm) enable_language(ASM-ATT) - + if(CMAKE_ASM-ATT_COMPILER_WORKS) add_custom_command( OUTPUT sse_win64.obj @@ -146,7 +146,7 @@ elseif(HAVE_XMMINTRIN_H) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMENT "Generating sse_win64.obj" ) - + list(APPEND FFTS_SOURCES ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj src/sse_win64.s @@ -169,7 +169,7 @@ elseif(HAVE_XMMINTRIN_H) message(WARNING "Dynamic code is only supported with x64, disabling dynamic code.") set(DISABLE_DYNAMIC_CODE ON) endif(CMAKE_SIZEOF_VOID_P EQUAL 8) - endif(NOT DISABLE_DYNAMIC_CODE) + endif(NOT DISABLE_DYNAMIC_CODE) endif(ENABLE_NEON) if(DISABLE_DYNAMIC_CODE) diff --git a/README b/README index b54ab69..f7a67a0 100644 --- a/README +++ b/README @@ -10,19 +10,16 @@ To build for Linux or OS X on x86, run make make install -Optionally build for Linux with CMake, run +Optionally build for Windows and Linux with CMake, run mkdir build + cd build cmake .. - -To build for Windows x64 with MSVC 2005, run - mkdir build - cmake .. -G "Visual Studio 8 2005 Win64" -Note that 32 bit Windows is not supported at the moment. - FFTS dynamically generates code at runtime. This can be disabled with --disable-dynamic-code +Note that 32 bit x86 dynamic machine code generation is not supported at the moment. + For JNI targets: --enable-jni will build the jni stuff automatically for the host target, and --enable-shared must also be added manually for it to work. -- cgit v1.1 From f16baa9919e28a57363e974e4adfff6c7dce9e74 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 6 Dec 2014 16:33:46 +0200 Subject: Definitions HAVE_NEON and HAVE_SSE cannot coexist --- src/ffts_real.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index c2f03b7..12c02b9 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -36,9 +36,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_NEON #include -#endif - -#ifdef HAVE_SSE +#elif HAVE_SSE #include #endif @@ -132,10 +130,6 @@ static void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) for (i = 0; i < N/2; i++) { out[2*i + 0] = buf[2*i + 0] * A[2*i] - buf[2*i + 1] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; out[2*i + 1] = buf[2*i + 1] * A[2*i] + buf[2*i + 0] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; - - /* out[2*N-2*i+0] = out[2*i+0]; - out[2*N-2*i+1] = -out[2*i+1]; - */ } #endif -- cgit v1.1 From 91ac7ca9420aa77e79bd533b15536c8f98b162c5 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 6 Dec 2014 16:36:40 +0200 Subject: Definitions HAVE_VFP and HAVE_SSE cannot coexist --- src/macros.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/macros.h b/src/macros.h index 12c52c6..0010534 100644 --- a/src/macros.h +++ b/src/macros.h @@ -52,9 +52,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_VFP #include "macros-alpha.h" -#endif - -#ifdef HAVE_SSE +#elif HAVE_SSE #include "macros-sse.h" #endif -- cgit v1.1 From d5496f9a78bb45192b176bd32d4b81c72fb576dd Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 7 Dec 2014 12:05:26 +0200 Subject: Fix issue #29 "Make FFTS work on all architectures" Modify macros-alpha.h to provide scalar operations on all platforms. Using union and memcpy to avoid strict aliasing issues. --- src/macros-alpha.h | 214 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 121 insertions(+), 93 deletions(-) diff --git a/src/macros-alpha.h b/src/macros-alpha.h index be5ec20..f4efaf8 100644 --- a/src/macros-alpha.h +++ b/src/macros-alpha.h @@ -1,10 +1,10 @@ /* - + This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2013, Michael J. Cree + + Copyright (c) 2013, Michael J. Cree Copyright (c) 2012, 2013, Anthony M. Blake - + All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,177 +31,205 @@ */ -#ifndef __MACROS_ALPHA_H__ -#define __MACROS_ALPHA_H__ +#ifndef FFTS_MACROS_ALPHA_H +#define FFTS_MACROS_ALPHA_H -#include +#include -#ifdef __alpha__ -#define restrict -#endif - -typedef struct {float r1, i1, r2, i2;} V; +typedef union { + struct { + float r1; + float i1; + float r2; + float i2; + } r; + uint32_t u[4]; +} V; #define FFTS_MALLOC(d,a) malloc(d) #define FFTS_FREE(d) free(d) -#define VLIT4(f3,f2,f1,f0) ((V){f0,f1,f2,f3}) - -static inline V VADD(V x, V y) +static FFTS_ALWAYS_INLINE V VLIT4(float f3, float f2, float f1, float f0) { V z; - z.r1 = x.r1 + y.r1; - z.i1 = x.i1 + y.i1; - z.r2 = x.r2 + y.r2; - z.i2 = x.i2 + y.i2; + + z.r.r1 = f0; + z.r.i1 = f1; + z.r.r2 = f2; + z.r.i2 = f3; + return z; } - -static inline V VSUB(V x, V y) +static FFTS_ALWAYS_INLINE V VADD(V x, V y) { V z; - z.r1 = x.r1 - y.r1; - z.i1 = x.i1 - y.i1; - z.r2 = x.r2 - y.r2; - z.i2 = x.i2 - y.i2; + + z.r.r1 = x.r.r1 + y.r.r1; + z.r.i1 = x.r.i1 + y.r.i1; + z.r.r2 = x.r.r2 + y.r.r2; + z.r.i2 = x.r.i2 + y.r.i2; + return z; } - -static inline V VMUL(V x, V y) +static FFTS_ALWAYS_INLINE V VSUB(V x, V y) { V z; - z.r1 = x.r1 * y.r1; - z.i1 = x.i1 * y.i1; - z.r2 = x.r2 * y.r2; - z.i2 = x.i2 * y.i2; + + z.r.r1 = x.r.r1 - y.r.r1; + z.r.i1 = x.r.i1 - y.r.i1; + z.r.r2 = x.r.r2 - y.r.r2; + z.r.i2 = x.r.i2 - y.r.i2; + return z; } -static inline V VXOR(V x, V y) +static FFTS_ALWAYS_INLINE V VMUL(V x, V y) { - V r; - r.r1 = (uint32_t)x.r1 ^ (uint32_t)y.r1; - r.i1 = (uint32_t)x.i1 ^ (uint32_t)y.i1; - r.r2 = (uint32_t)x.r2 ^ (uint32_t)y.r2; - r.i2 = (uint32_t)x.i2 ^ (uint32_t)y.i2; - return r; + V z; + + z.r.r1 = x.r.r1 * y.r.r1; + z.r.i1 = x.r.i1 * y.r.i1; + z.r.r2 = x.r.r2 * y.r.r2; + z.r.i2 = x.r.i2 * y.r.i2; + + return z; } -static inline V VSWAPPAIRS(V x) +static FFTS_ALWAYS_INLINE V VXOR(V x, V y) { V z; - z.r1 = x.i1; - z.i1 = x.r1; - z.r2 = x.i2; - z.i2 = x.r2; + + z.u[0] = x.u[0] ^ y.u[0]; + z.u[1] = x.u[1] ^ y.u[1]; + z.u[2] = x.u[2] ^ y.u[2]; + z.u[3] = x.u[3] ^ y.u[3]; + return z; } +static FFTS_ALWAYS_INLINE V VSWAPPAIRS(V x) +{ + V z; + + z.r.r1 = x.r.i1; + z.r.i1 = x.r.r1; + z.r.r2 = x.r.i2; + z.r.i2 = x.r.r2; + + return z; +} -static inline V VBLEND(V x, V y) +static FFTS_ALWAYS_INLINE V VBLEND(V x, V y) { V z; - z.r1 = x.r1; - z.i1 = x.i1; - z.r2 = y.r2; - z.i2 = y.i2; + + z.r.r1 = x.r.r1; + z.r.i1 = x.r.i1; + z.r.r2 = y.r.r2; + z.r.i2 = y.r.i2; + return z; } -static inline V VUNPACKHI(V x, V y) +static FFTS_ALWAYS_INLINE V VUNPACKHI(V x, V y) { V z; - z.r1 = x.r2; - z.i1 = x.i2; - z.r2 = y.r2; - z.i2 = y.i2; + + z.r.r1 = x.r.r2; + z.r.i1 = x.r.i2; + z.r.r2 = y.r.r2; + z.r.i2 = y.r.i2; + return z; } -static inline V VUNPACKLO(V x, V y) +static FFTS_ALWAYS_INLINE V VUNPACKLO(V x, V y) { V z; - z.r1 = x.r1; - z.i1 = x.i1; - z.r2 = y.r1; - z.i2 = y.i1; + + z.r.r1 = x.r.r1; + z.r.i1 = x.r.i1; + z.r.r2 = y.r.r1; + z.r.i2 = y.r.i1; + return z; } -static inline V VDUPRE(V x) +static FFTS_ALWAYS_INLINE V VDUPRE(V x) { V z; - z.r1 = x.r1; - z.i1 = x.r1; - z.r2 = x.r2; - z.i2 = x.r2; + + z.r.r1 = x.r.r1; + z.r.i1 = x.r.r1; + z.r.r2 = x.r.r2; + z.r.i2 = x.r.r2; + return z; } -static inline V VDUPIM(V x) +static FFTS_ALWAYS_INLINE V VDUPIM(V x) { V z; - z.r1 = x.i1; - z.i1 = x.i1; - z.r2 = x.i2; - z.i2 = x.i2; + + z.r.r1 = x.r.i1; + z.r.i1 = x.r.i1; + z.r.r2 = x.r.i2; + z.r.i2 = x.r.i2; + return z; } -static inline V IMUL(V d, V re, V im) +static FFTS_ALWAYS_INLINE V IMUL(V d, V re, V im) { re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); + return VSUB(re, im); } - -static inline V IMULJ(V d, V re, V im) +static FFTS_ALWAYS_INLINE V IMULJ(V d, V re, V im) { re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); return VADD(re, im); } -static inline V MULI(int inv, V x) +static FFTS_ALWAYS_INLINE V MULI(int inv, V x) { V z; if (inv) { - z.r1 = -x.r1; - z.i1 = x.i1; - z.r2 = -x.r2; - z.i2 = x.i2; - }else{ - z.r1 = x.r1; - z.i1 = -x.i1; - z.r2 = x.r2; - z.i2 = -x.i2; + z.r.r1 = -x.r.r1; + z.r.i1 = x.r.i1; + z.r.r2 = -x.r.r2; + z.r.i2 = x.r.i2; + } else { + z.r.r1 = x.r.r1; + z.r.i1 = -x.r.i1; + z.r.r2 = x.r.r2; + z.r.i2 = -x.r.i2; } + return z; } - -static inline V IMULI(int inv, V x) +static FFTS_ALWAYS_INLINE V IMULI(int inv, V x) { return VSWAPPAIRS(MULI(inv, x)); } - -static inline V VLD(const void *s) +static FFTS_ALWAYS_INLINE V VLD(const void *s) { - V *d = (V *)s; - return *d; + V z; + memcpy(&z, s, sizeof(z)); + return z; } - -static inline void VST(void *d, V s) +static FFTS_ALWAYS_INLINE void VST(void *d, V s) { - V *r = (V *)d; + V *r = (V*) d; *r = s; } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_MACROS_ALPHA_H */ \ No newline at end of file -- cgit v1.1 From 32b519b6b766c5f310068aaed092cade8b31d078 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 7 Dec 2014 12:08:23 +0200 Subject: Fix warning "cast from pointer to integer of different size" --- src/ffts.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index ee0102b..c682b28 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -151,11 +151,11 @@ void ffts_execute(ffts_plan_t *p, const void *in, void *out) { /* TODO: Define NEEDS_ALIGNED properly instead */ #if defined(HAVE_SSE) || defined(HAVE_NEON) - if (((int) in % 16) != 0) { + if (((uintptr_t) in % 16) != 0) { LOG("ffts_execute: input buffer needs to be aligned to a 128bit boundary\n"); } - if (((int) out % 16) != 0) { + if (((uintptr_t) out % 16) != 0) { LOG("ffts_execute: output buffer needs to be aligned to a 128bit boundary\n"); } #endif -- cgit v1.1 From 29455865d8e6de9fcedefad6385041b2e59a82a3 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sun, 7 Dec 2014 12:10:28 +0200 Subject: To use build scalar only version, "cmake -DENABLE_VFP=ON -DDISABLE_DYNAMIC_CODE=ON" --- CMakeLists.txt | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 16ca9af..f9be0d1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -111,10 +111,12 @@ if(ENABLE_NEON) add_definitions(-DHAVE_NEON) elseif(ENABLE_VFP) - list(APPEND FFTS_SOURCES - src/vfp.s - src/arch/vfp.c - ) + if(NOT DISABLE_DYNAMIC_CODE) + list(APPEND FFTS_SOURCES + src/vfp.s + src/arch/vfp.c + ) + endif(NOT DISABLE_DYNAMIC_CODE) add_definitions(-DHAVE_VFP) elseif(HAVE_XMMINTRIN_H) -- cgit v1.1 From 4fa682c1522ae0f3ded4ccc0efcb817c16c5f4e7 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 9 Dec 2014 10:41:12 +0200 Subject: Fix warning "comparison between signed and unsigned integer expressions" --- src/ffts.c | 2 +- src/ffts_nd.c | 7 ++++--- src/ffts_real_nd.c | 14 ++++++++------ src/patterns.c | 4 ++-- src/patterns.h | 4 ++-- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index c682b28..3def70d 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -211,7 +211,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) float *tmp; cdata_t *w; size_t i; - int n; + size_t n; #ifdef __arm__ /* #ifdef HAVE_NEON */ diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 3651d1f..8a642de 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -262,11 +262,12 @@ static void ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) uint64_t *dout = (uint64_t*) out; ffts_plan_t *plan; - size_t i, j; + int i; + size_t j; plan = p->plans[0]; - for (i = 0; i < p->Ns[0]; i++) { - plan->transform(plan, din + (i * p->Ms[0]), buf + (i * p->Ms[0])); + for (j = 0; j < p->Ns[0]; j++) { + plan->transform(plan, din + (j * p->Ms[0]), buf + (j * p->Ms[0])); } ffts_transpose(buf, dout, p->Ms[0], p->Ns[0], p->transpose_buf); diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 8b66333..105d388 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -155,11 +155,12 @@ static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) uint64_t *transpose_buf = (uint64_t*) p->transpose_buf; ffts_plan_t *plan; - size_t i, j; + int i; + size_t j; plan = p->plans[0]; - for (i = 0; i < Ns0; i++) { - plan->transform(plan, din + (i * Ms0), buf + (i * (Ms0 / 2 + 1))); + for (j = 0; j < Ns0; j++) { + plan->transform(plan, din + (j * Ms0), buf + (j * (Ms0 / 2 + 1))); } ffts_scalar_transpose(buf, dout, Ms0 / 2 + 1, Ns0, transpose_buf); @@ -194,7 +195,8 @@ static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) ffts_plan_t *plan; size_t vol; - size_t i, j; + int i; + size_t j; vol = p->Ns[0]; for (i = 1; i < p->rank; i++) { @@ -206,8 +208,8 @@ static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) ffts_scalar_transpose(din, buf, Ms0, Ns0, transpose_buf); plan = p->plans[0]; - for (i = 0; i < Ms0; i++) { - plan->transform(plan, buf + (i * Ns0), buf2 + (i * Ns0)); + for (j = 0; j < Ms0; j++) { + plan->transform(plan, buf + (j * Ns0), buf2 + (j * Ns0)); } ffts_scalar_transpose(buf2, buf, Ns0, Ms0, transpose_buf); diff --git a/src/patterns.c b/src/patterns.c index f748c48..91f4250 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -107,7 +107,7 @@ static void ffts_hardcodedleaf_is_rec(ptrdiff_t **is, int big_N, int N, int poff } } -ptrdiff_t *ffts_init_is(int N, int leaf_N, int VL) +ptrdiff_t *ffts_init_is(size_t N, size_t leaf_N, int VL) { int i, i0, i1, i2; int stride = (int) (log(N/leaf_N) / log(2)); @@ -163,7 +163,7 @@ static int ffts_compare_offsets(const void *a, const void *b) return (int) diff; } -ptrdiff_t *ffts_init_offsets(int N, int leaf_N) +ptrdiff_t *ffts_init_offsets(size_t N, size_t leaf_N) { ptrdiff_t *offsets, *tmp; size_t i; diff --git a/src/patterns.h b/src/patterns.h index 680c6e0..d172651 100644 --- a/src/patterns.h +++ b/src/patterns.h @@ -40,7 +40,7 @@ #include -ptrdiff_t *ffts_init_is(int N, int leaf_N, int VL); -ptrdiff_t *ffts_init_offsets(int N, int leaf_N); +ptrdiff_t *ffts_init_is(size_t N, size_t leaf_N, int VL); +ptrdiff_t *ffts_init_offsets(size_t N, size_t leaf_N); #endif /* FFTS_PATTERNS_H */ -- cgit v1.1 From 74df6cf04b6d2fb924af6530b568275800c16258 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 9 Mar 2015 17:05:23 +0200 Subject: To support generic building, fallback using scalar macros from macros-alpha.h --- src/macros.h | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/macros.h b/src/macros.h index 0010534..b4a6a5a 100644 --- a/src/macros.h +++ b/src/macros.h @@ -40,20 +40,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_NEON #include "macros-neon.h" -#else -#ifdef __alpha__ -#include "macros-alpha.h" -#else -#ifdef __powerpc__ -#include "macros-altivec.h" -#endif -#endif -#endif - -#ifdef HAVE_VFP -#include "macros-alpha.h" #elif HAVE_SSE #include "macros-sse.h" +#elif __powerpc__ +#include "macros-altivec.h" +#else +#include "macros-alpha.h" #endif static FFTS_INLINE void TX2(V *a, V *b) -- cgit v1.1 From 68f0ffcb485da7317ccf9fe1c16b60bba1a53499 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 9 Mar 2015 17:09:02 +0200 Subject: Fix FFT size 2, FFT size 2 and 4 don't need/use lookup tables. --- src/ffts.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ffts.c b/src/ffts.c index 3def70d..5ff5eb2 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -500,7 +500,8 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) p->destroy = ffts_free_1d; p->N = N; - if (ffts_generate_luts(p, N, leaf_N, sign)) { + /* generate lookup tables */ + if (N > 4 && ffts_generate_luts(p, N, leaf_N, sign)) { goto cleanup; } -- cgit v1.1 From 40532246c112846278c526967a377535bbea3c4e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 11:13:17 +0200 Subject: Fix ENABLE_VFP options in CMake --- CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f9be0d1..2ef0a9d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -114,10 +114,13 @@ elseif(ENABLE_VFP) if(NOT DISABLE_DYNAMIC_CODE) list(APPEND FFTS_SOURCES src/vfp.s - src/arch/vfp.c ) endif(NOT DISABLE_DYNAMIC_CODE) + list(APPEND FFTS_SOURCES + src/vfp.s + ) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") add_definitions(-DHAVE_VFP) elseif(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) -- cgit v1.1 From 03b045eba77c2a140efe1bc9d71d81a9c64c1c1e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 11:14:33 +0200 Subject: Wrong inclusion order --- src/codegen.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen.c b/src/codegen.c index 5cd7616..987d55f 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -44,8 +44,8 @@ typedef uint8_t insns_t; #include "codegen_arm.h" #include "neon.h" #elif HAVE_VFP -#include "codegen_arm.h" #include "vfp.h" +#include "codegen_arm.h" #else #include "codegen_sse.h" #endif -- cgit v1.1 From 3260224d6fd7aaf885792e1115438c07ee15f69f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 11:21:46 +0200 Subject: Undefined variable in generate_prologue --- src/codegen_arm.h | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/codegen_arm.h b/src/codegen_arm.h index 3493a11..b739055 100644 --- a/src/codegen_arm.h +++ b/src/codegen_arm.h @@ -31,10 +31,8 @@ */ -#ifndef __CODEGEN_ARM_H__ -#define __CODEGEN_ARM_H__ - - +#ifndef FFTS_CODEGEN_ARM_H +#define FFTS_CODEGEN_ARM_H uint32_t BL(void *pos, void *target) { return 0xeb000000 | (((target - pos) / 4) & 0xffffff); @@ -195,14 +193,14 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) *(*fp)++ = PUSH_LR(); *(*fp)++ = 0xed2d8b10; - ADDI(fp, 3, 1, 0); - ADDI(fp, 7, 1, N); - ADDI(fp, 5, 1, 2*N); - ADDI(fp, 10, 7, 2*N); - ADDI(fp, 4, 5, 2*N); - ADDI(fp, 8, 10, 2*N); - ADDI(fp, 6, 4, 2*N); - ADDI(fp, 9, 8, 2*N); + ADDI(fp, 3, 1, 0); + ADDI(fp, 7, 1, p->N); + ADDI(fp, 5, 1, 2 * p->N); + ADDI(fp, 10, 7, 2 * p->N); + ADDI(fp, 4, 5, 2 * p->N); + ADDI(fp, 8, 10, 2 * p->N); + ADDI(fp, 6, 4, 2 * p->N); + ADDI(fp, 9, 8, 2 * p->N); // load offsets into r12 *(*fp)++ = LDRI(12, 0, ((uint32_t) &p->offsets) - ((uint32_t) p)); @@ -221,5 +219,4 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) return start; } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_CODEGEN_ARM_H */ \ No newline at end of file -- cgit v1.1 From ca6ca7311fe13efc3b7d0ffeb21d32e29cde3f1b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 11:22:46 +0200 Subject: Variable 'tmp' set but not used in function 'ffts_generate_luts' --- src/ffts.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 5ff5eb2..528075f 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -208,7 +208,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) int hardcoded; size_t lut_size; size_t n_luts; - float *tmp; cdata_t *w; size_t i; size_t n; @@ -460,8 +459,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) n *= 2; } - tmp = (float *)p->ws; - #ifdef __arm__ if (sign < 0) { p->oe_ws = (void*)(&w_data[4]); -- cgit v1.1 From df3e73b193429e9b3db977a3aa67a77a0155e39c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 11:31:53 +0200 Subject: Remove FFTS plan variable 'transforms', which is not used --- src/ffts.c | 12 ------------ src/ffts_internal.h | 5 ----- 2 files changed, 17 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 528075f..4d732ce 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -187,10 +187,6 @@ void ffts_free_1d(ffts_plan_t *p) FFTS_FREE(p->ws); } - if (p->transforms) { - free(p->transforms); - } - if (p->is) { free(p->is); } @@ -574,14 +570,6 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) } #endif } else { - p->transforms = malloc(2 * sizeof(*p->transforms)); - if (!p->transforms) { - goto cleanup; - } - - p->transforms[0] = 0; - p->transforms[1] = 1; - switch (N) { case 2: p->transform = &ffts_firstpass_2; diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 413bb51..a77fb0d 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -127,11 +127,6 @@ struct _ffts_plan_t { void *lastlut; /** - * Used in multidimensional Code ?? - */ - size_t *transforms; - - /** * Pointer to the dynamically generated function * that will execute the FFT */ -- cgit v1.1 From ebae52c72d3488d123c6ca9e31dfd95872d0575c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:19:38 +0200 Subject: Check existence of various headers and add guards for them --- CMakeLists.txt | 28 ++++++++++++++++++++++++++++ src/codegen.c | 8 +++++++- src/ffts.c | 3 ++- src/ffts_internal.h | 7 +++++++ src/ffts_small.c | 2 -- src/patterns.c | 3 +++ 6 files changed, 47 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ef0a9d..383c01f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,33 @@ include(CheckIncludeFile) add_definitions(-DFFTS_CMAKE_GENERATED) +# check existence of various headers +check_include_file(stdint.h HAVE_STDINT_H) +check_include_file(stdlib.h HAVE_STDLIB_H) +check_include_file(string.h HAVE_STRING_H) +check_include_file(sys/mman.h HAVE_SYS_MMAN_H) +check_include_file(unistd.h HAVE_UNISTD_H) + +if(HAVE_STDINT_H) + add_definitions(-DHAVE_STDINT_H) +endif(HAVE_STDINT_H) + +if(HAVE_STDLIB_H) + add_definitions(-DHAVE_STDLIB_H) +endif(HAVE_STDLIB_H) + +if(HAVE_STRING_H) + add_definitions(-DHAVE_STRING_H) +endif(HAVE_STRING_H) + +if(HAVE_SYS_MMAN_H) + add_definitions(-DHAVE_SYS_MMAN_H) +endif(HAVE_SYS_MMAN_H) + +if(HAVE_UNISTD_H) + add_definitions(-DHAVE_UNISTD_H) +endif(HAVE_UNISTD_H) + # check if the platform has support for SSE SIMD extension check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) if(HAVE_XMMINTRIN_H) @@ -43,6 +70,7 @@ else() # check if the platform has support NEON SIMD extension check_include_file(arm_neon.h HAVE_ARM_NEON_H) if(HAVE_ARM_NEON_H) + add_definitions(-DHAVE_NEON) endif(HAVE_ARM_NEON_H) endif(HAVE_XMMINTRIN_H) diff --git a/src/codegen.c b/src/codegen.c index 987d55f..7635875 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -54,10 +54,16 @@ typedef uint8_t insns_t; #include #include /* #include */ + +#ifdef HAVE_STDLIB_H #include +#endif + +#ifdef HAVE_STRING_H #include +#endif -#ifdef __ANDROID__ +#ifdef HAVE_UNISTD_H #include #endif diff --git a/src/ffts.c b/src/ffts.c index 4d732ce..22172ab 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -32,6 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts.h" + #include "ffts_internal.h" #include "macros.h" #include "patterns.h" @@ -49,7 +50,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if _WIN32 #include -#else +#elif HAVE_SYS_MMAN_H #include #endif diff --git a/src/ffts_internal.h b/src/ffts_internal.h index a77fb0d..2007242 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -42,8 +42,15 @@ #include #include #include + +#ifdef HAVE_STDINT_H #include +#endif + +#ifdef HAVE_STDLIB_H #include +#endif + #include #define FFTS_PREFIX ffts diff --git a/src/ffts_small.c b/src/ffts_small.c index 8fa373f..ccc3ab0 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -35,8 +35,6 @@ #include "ffts_internal.h" #include "macros.h" -#include - void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out) { const data_t *din = (const data_t*) in; diff --git a/src/patterns.c b/src/patterns.c index 91f4250..17133d1 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -37,7 +37,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include + +#ifdef HAVE_STDLIB_H #include +#endif static void ffts_permute_addr(int N, int offset, int stride, int *d) { -- cgit v1.1 From e85f7d980613130190e41fa9d8c0cb30cff1477a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:28:48 +0200 Subject: Improve header logic --- src/ffts.c | 8 +++++--- src/ffts_nd.c | 4 +--- src/ffts_real_nd.c | 4 +--- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 22172ab..b9d4700 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -44,15 +44,17 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "codegen.h" #endif +#if _WIN32 +#include +#else #if __APPLE__ #include #endif -#if _WIN32 -#include -#elif HAVE_SYS_MMAN_H +#if HAVE_SYS_MMAN_H #include #endif +#endif #ifdef __arm__ static const FFTS_ALIGN(64) float w_data[16] = { diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 8a642de..839e35b 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -37,9 +37,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_NEON #include "neon.h" #include -#endif - -#ifdef HAVE_SSE +#elif HAVE_SSE #include #endif diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 105d388..5eae44b 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -41,9 +41,7 @@ #ifdef HAVE_NEON #include -#endif - -#ifdef HAVE_SSE +#elif HAVE_SSE #include #endif -- cgit v1.1 From 0ba2fd02b3eca061c9aca4055105647699707cfc Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:30:30 +0200 Subject: Remove duplicate code --- src/ffts.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index b9d4700..34aa0c9 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -211,24 +211,11 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) size_t i; size_t n; -#ifdef __arm__ - /* #ifdef HAVE_NEON */ - V MULI_SIGN; - - if(sign < 0) { - MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); - } else { - MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); - } - - /* #endif */ -#else if (sign < 0) { MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); } else { MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); } -#endif /* LUTS */ n_luts = ffts_ctzl(N / leaf_N); -- cgit v1.1 From 2cbcfc4bab55a3d7bef84f8c33eba7364d67e456 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:42:52 +0200 Subject: Added some casting corrections --- src/ffts.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 34aa0c9..80ff69e 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -271,7 +271,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) goto cleanup; } - p->ws_is = malloc(n_luts * sizeof(*p->ws_is)); + p->ws_is = (size_t*) malloc(n_luts * sizeof(*p->ws_is)); if (!p->ws_is) { goto cleanup; } @@ -360,9 +360,9 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) FFTS_FREE(w0); } else { - cdata_t *w0 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); - cdata_t *w1 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); - cdata_t *w2 = FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + cdata_t *w0 = (cdata_t*) FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + cdata_t *w1 = (cdata_t*) FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); + cdata_t *w2 = (cdata_t*) FFTS_MALLOC(n/8 * sizeof(cdata_t), 32); float *fw0 = (float*) w0; float *fw1 = (float*) w1; -- cgit v1.1 From 6049dc26c2a839d7c8e8f9622dad70c1174a6418 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:44:26 +0200 Subject: Add explicit '.fpu' directive --- src/vfp.s | 99 +++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 49 insertions(+), 50 deletions(-) diff --git a/src/vfp.s b/src/vfp.s index 8ced89d..a60367d 100644 --- a/src/vfp.s +++ b/src/vfp.s @@ -30,7 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - + .fpu vfp @ assumes r0 = out @ r1 = in ? @@ -41,7 +41,7 @@ @ r2 = const pointer @ & lr = temps - .align 4 + .align 4 #ifdef __APPLE__ .globl _vfp_e _vfp_e: @@ -50,44 +50,44 @@ _vfp_e: vfp_e: #endif _vfp_e_loop: - vldr s15, [r2, #8] - vldr s2, [r3] @ x0 - vldr s0, [r3, #4] - vldr s4, [r4] @ x1 - vldr s11, [r2] - vldr s10, [r7] @ x4 - vldr s3, [r7, #4] - vldr s8, [r8] @ x5 - vldr s1, [r8, #4] - vldr s14, [r9] @ x6 - vldr s9, [r9, #4] - vldr s6, [r10] @ x7 - vldr s12, [r10, #4] + vldr s15, [r2, #8] + vldr s2, [r3] @ x0 + vldr s0, [r3, #4] + vldr s4, [r4] @ x1 + vldr s11, [r2] + vldr s10, [r7] @ x4 + vldr s3, [r7, #4] + vldr s8, [r8] @ x5 + vldr s1, [r8, #4] + vldr s14, [r9] @ x6 + vldr s9, [r9, #4] + vldr s6, [r10] @ x7 + vldr s12, [r10, #4] vsub.f32 s18, s3, s1 vsub.f32 s7, s10, s8 vsub.f32 s5, s14, s6 vadd.f32 s6, s14, s6 - vldr s24, [r5, #4] + vldr s24, [r5, #4] vsub.f32 s14, s9, s12 - vldr s22, [r6, #4] + vldr s22, [r6, #4] vadd.f32 s8, s10, s8 - vldr s28, [r6] @ x3 - vldr s17, [r5] @ x2 + vldr s28, [r6] @ x3 + vldr s17, [r5] @ x2 vadd.f32 s10, s9, s12 vmul.f32 s13, s18, s15 vmul.f32 s9, s7, s11 vmul.f32 s16, s5, s11 vmul.f32 s18, s18, s11 vmul.f32 s30, s14, s11 - vldr s11, [r4, #4] - add r3, r3, #8 - add r4, r4, #8 - add r5, r5, #8 - add r6, r6, #8 - add r7, r7, #8 - add r8, r8, #8 - add r9, r9, #8 - add r10, r10, #8 + vldr s11, [r4, #4] + add r3, r3, #8 + add r4, r4, #8 + add r5, r5, #8 + add r6, r6, #8 + add r7, r7, #8 + add r8, r8, #8 + add r9, r9, #8 + add r10, r10, #8 vmul.f32 s12, s5, s15 vmul.f32 s20, s14, s15 vadd.f32 s5, s2, s4 @@ -111,7 +111,7 @@ _vfp_e_loop: vsub.f32 s12, s30, s12 vadd.f32 s20, s3, s10 vsub.f32 s15, s3, s10 - vsub.f32 s3, s26, s1 + vsub.f32 s3, s26, s1 vadd.f32 s18, s9, s13 vadd.f32 s10, s14, s4 vadd.f32 s6, s2, s7 @ @@ -120,15 +120,15 @@ _vfp_e_loop: vsub.f32 s4, s14, s4 vsub.f32 s8, s22, s16 @ vadd.f32 s1, s28, s12 -ldr lr, [r12], #4 -add lr, r0, lr, lsl #2 -subs r11, r11, #1 - vstr s18, [lr] + ldr lr, [r12], #4 + add lr, r0, lr, lsl #2 + subs r11, r11, #1 + vstr s18, [lr] vsub.f32 s2, s28, s12 vadd.f32 s12, s22, s16 @ vsub.f32 s16, s3, s24 @ vsub.f32 s13, s9, s13 - vstr s26, [lr, #4] + vstr s26, [lr, #4] vadd.f32 s28, s5, s15 @ vsub.f32 s7, s5, s15 @ vadd.f32 s14, s6, s10 @@ -136,26 +136,26 @@ subs r11, r11, #1 vadd.f32 s9, s0, s2 @ vsub.f32 s2, s0, s2 @ vsub.f32 s11, s11, s20 - vstr s28, [lr, #16] + vstr s28, [lr, #16] vadd.f32 s3, s3, s24 @ - vstr s16, [lr, #20] + vstr s16, [lr, #20] vsub.f32 s6, s6, s10 - vstr s13, [lr, #32] + vstr s13, [lr, #32] vsub.f32 s13, s12, s4 @ vsub.f32 s8, s8, s1 vadd.f32 s0, s12, s4 @ - vstr s11, [lr, #36] - vstr s7, [lr, #48] - vstr s3, [lr, #52] - vstr s14, [lr, #8] - vstr s5, [lr, #12] - vstr s9, [lr, #24] - vstr s13, [lr, #28] - vstr s6, [lr, #40] - vstr s8, [lr, #44] - vstr s2, [lr, #56] - vstr s0, [lr, #60] - bne _vfp_e_loop + vstr s11, [lr, #36] + vstr s7, [lr, #48] + vstr s3, [lr, #52] + vstr s14, [lr, #8] + vstr s5, [lr, #12] + vstr s9, [lr, #24] + vstr s13, [lr, #28] + vstr s6, [lr, #40] + vstr s8, [lr, #44] + vstr s2, [lr, #56] + vstr s0, [lr, #60] + bne _vfp_e_loop @ assumes r0 = out @ r1 = in ? @@ -461,7 +461,6 @@ _vfp_x8_loop: bne _vfp_x8_loop bx lr - .align 4 #ifdef __APPLE__ .globl _vfp_end -- cgit v1.1 From 8cb4d196b65190b275861fcf4d9add78dc708184 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 12:47:13 +0200 Subject: Remove redefinition of 'fw' in function 'ffts_generate_luts' --- src/ffts.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 80ff69e..1098bda 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -382,8 +382,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) } #ifdef __arm__ - //w = FFTS_MALLOC(n/8 * 3 * sizeof(cdata_t), 32); - float *fw = (float *)w; #ifdef HAVE_NEON VS temp0, temp1, temp2; for (j = 0; j < n/8; j += 4) { -- cgit v1.1 From 7ab9d5cc36798afd58accdf589a9004fa0ed49f0 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 13:01:29 +0200 Subject: Removal of 'transforms' broke dynamic code --- src/ffts_internal.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 2007242..3e788f8 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -133,6 +133,8 @@ struct _ffts_plan_t { size_t N; void *lastlut; + size_t *temporary_fix_as_dynamic_code_assumes_fixed_offset; + /** * Pointer to the dynamically generated function * that will execute the FFT -- cgit v1.1 From 46a03b4bad191d3f258660e4d09742e47dee9a4c Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:39:11 +0200 Subject: For the moment assume HAVE_VFP as final fallback --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 383c01f..8dbc7d1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -71,6 +71,8 @@ else() check_include_file(arm_neon.h HAVE_ARM_NEON_H) if(HAVE_ARM_NEON_H) add_definitions(-DHAVE_NEON) + else() + add_definitions(-DHAVE_VFP) endif(HAVE_ARM_NEON_H) endif(HAVE_XMMINTRIN_H) -- cgit v1.1 From fcd054b7cf24c30e912a2d5f615b6aa241d74ce2 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:41:12 +0200 Subject: Dereference pointer --- src/codegen_arm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen_arm.h b/src/codegen_arm.h index b739055..2948ec3 100644 --- a/src/codegen_arm.h +++ b/src/codegen_arm.h @@ -188,7 +188,7 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) { - insns_t *start = fp; + insns_t *start = *fp; *(*fp)++ = PUSH_LR(); *(*fp)++ = 0xed2d8b10; -- cgit v1.1 From 97c148a34daacd554ab2487b5ff2ed6b0a49de94 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:42:36 +0200 Subject: Remove redefinitions and fix naming --- src/codegen.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index 7635875..b70f011 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -528,9 +528,6 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N // r2 - ws // ADDI(&fp, 3, 1, 0); // put N into r3 for counter - int32_t pAddr = 0; - int32_t pN = 0; - int32_t pLUT = 0; count = 2; while(pps[0]) { @@ -542,11 +539,11 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN); } - if(p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT) - ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8 - pLUT); + if(p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8 - pLUT) + ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8 - pLUT); - if(pps[0] == 2*leafN) { + if(pps[0] == 2 * leaf_N) { *fp = BL(fp+2, x_4_addr); fp++; } else if(!pps[2]) { @@ -581,7 +578,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N pAddr = pps[1] * 4; pN = pps[0]; - pLUT = p->ws_is[__builtin_ctzl(pps[0]/leafN)-1]*8;//LUT_offset(pps[0], leafN); + pLUT = p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8;//LUT_offset(pps[0], leafN); // fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); count += 4; pps += 2; -- cgit v1.1 From 786c32e1d098229e3d6d2bf3ff1c0ab2d0b3c42e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:43:03 +0200 Subject: Remove duplicate code --- src/ffts.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 1098bda..52c302b 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -503,15 +503,8 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) p->i1++; } -#ifdef __arm__ -#ifdef HAVE_NEON - p->i0 /= 2; - p->i1 /= 2; -#endif -#else p->i0 /= 2; p->i1 /= 2; -#endif #ifdef DYNAMIC_DISABLED if (sign < 0) { @@ -541,6 +534,7 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) goto cleanup; } + /* generate code */ p->transform = ffts_generate_func_code(p, N, leaf_N, sign); if (!p->transform) { -- cgit v1.1 From 1f5219fb1bd5519544c92dcf3149b4524c6c728f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:58:59 +0200 Subject: Don't add 'vfp.s' if dynamic code is disabled --- CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8dbc7d1..0acf710 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,9 +146,6 @@ elseif(ENABLE_VFP) src/vfp.s ) endif(NOT DISABLE_DYNAMIC_CODE) - list(APPEND FFTS_SOURCES - src/vfp.s - ) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") add_definitions(-DHAVE_VFP) -- cgit v1.1 From 4468d365c34ddac8680e7d4d46b2bf5684377094 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 17:59:56 +0200 Subject: ARM compile shall use generic C if dynamic code is disabled --- src/ffts.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 52c302b..ba41208 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -56,7 +56,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) static const FFTS_ALIGN(64) float w_data[16] = { 0.70710678118654757273731092936941f, 0.70710678118654746171500846685376f, @@ -241,7 +241,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) for (i = 0; i < n_luts; i++) { if (!i || hardcoded) { -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) if (N <= 32) { lut_size += n/4 * 2 * sizeof(cdata_t); } else { @@ -252,7 +252,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #endif n *= 2; } else { -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) lut_size += n/8 * 3 * sizeof(cdata_t); #else lut_size += n/8 * 3 * 2 * sizeof(cdata_t); @@ -303,7 +303,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w0[j][1] = W_im(n,j); } -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) if (N < 32) { // w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32); float *fw = (float *)w; @@ -381,7 +381,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w2[j][1] = W_im((float) n, (float) (j + (n/8))); } -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) #ifdef HAVE_NEON VS temp0, temp1, temp2; for (j = 0; j < n/8; j += 4) { @@ -443,7 +443,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) n *= 2; } -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) if (sign < 0) { p->oe_ws = (void*)(&w_data[4]); p->ee_ws = (void*)(w_data); @@ -514,7 +514,7 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) } #else /* determinate transform size */ -#ifdef __arm__ +#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) if (N < 8192) { p->transform_size = 8192; } else { -- cgit v1.1 From 90436adee6dff72c1e51b0528e57deb57f50fb28 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 10 Mar 2015 18:12:48 +0200 Subject: Don't use CMake option name --- src/ffts.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index ba41208..0696daa 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -56,7 +56,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) static const FFTS_ALIGN(64) float w_data[16] = { 0.70710678118654757273731092936941f, 0.70710678118654746171500846685376f, @@ -241,7 +241,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) for (i = 0; i < n_luts; i++) { if (!i || hardcoded) { -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (N <= 32) { lut_size += n/4 * 2 * sizeof(cdata_t); } else { @@ -252,7 +252,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #endif n *= 2; } else { -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) lut_size += n/8 * 3 * sizeof(cdata_t); #else lut_size += n/8 * 3 * 2 * sizeof(cdata_t); @@ -303,7 +303,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w0[j][1] = W_im(n,j); } -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (N < 32) { // w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32); float *fw = (float *)w; @@ -381,7 +381,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w2[j][1] = W_im((float) n, (float) (j + (n/8))); } -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) #ifdef HAVE_NEON VS temp0, temp1, temp2; for (j = 0; j < n/8; j += 4) { @@ -443,7 +443,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) n *= 2; } -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (sign < 0) { p->oe_ws = (void*)(&w_data[4]); p->ee_ws = (void*)(w_data); @@ -514,7 +514,7 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) } #else /* determinate transform size */ -#if defined(__arm__) && !defined(DISABLE_DYNAMIC_CODE) +#if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (N < 8192) { p->transform_size = 8192; } else { -- cgit v1.1 From 9ba8731f20a6bf6771f9a9812268c8bf958134c6 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 13:14:11 +0200 Subject: Now ARM VFP building works with "CMake -DENABLE_VFP=ON". Performance when "-DDYNAMIC_CODE_DISABLED=ON" is poor, but it can be improved with gcc switches. --- src/ffts.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ffts.c b/src/ffts.c index 0696daa..84a1145 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -503,8 +503,10 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) p->i1++; } +#if !defined(HAVE_VFP) || defined(DYNAMIC_DISABLED) p->i0 /= 2; p->i1 /= 2; +#endif #ifdef DYNAMIC_DISABLED if (sign < 0) { -- cgit v1.1 From 4bd30d719919afeda7ec1369b124f661121d69ef Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 15:31:16 +0200 Subject: Added "-mfloat-abi=softfp" as default for ARM --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0acf710..537da41 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -135,10 +135,12 @@ if(ENABLE_NEON) else() list(APPEND FFTS_SOURCES src/neon.s - src/arch/neon.c ) endif(DISABLE_DYNAMIC_CODE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") + add_definitions(-DHAVE_NEON) elseif(ENABLE_VFP) if(NOT DISABLE_DYNAMIC_CODE) @@ -148,6 +150,8 @@ elseif(ENABLE_VFP) endif(NOT DISABLE_DYNAMIC_CODE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") + add_definitions(-DHAVE_VFP) elseif(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) -- cgit v1.1 From 750d7537801fac85104b3c51af8ee56b8c5109b4 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 16:21:49 +0200 Subject: Use FFTS_INLINE instead of __INLINE and make functions static --- src/macros-neon.h | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/src/macros-neon.h b/src/macros-neon.h index c8b5720..c015f47 100644 --- a/src/macros-neon.h +++ b/src/macros-neon.h @@ -29,8 +29,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __MACROS_NEON_H__ -#define __MACROS_NEON_H__ +#ifndef FFTS_MACROS_NEON_H +#define FFTS_MACROS_NEON_H #include "neon.h" #include @@ -47,9 +47,9 @@ typedef float32x4x2_t VS; #define VMUL vmulq_f32 #define VXOR(x,y) (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y)))) #define VST vst1q_f32 -#define VLD vld1q_f32 +#define VLD vld1q_f32 #define VST2 vst2q_f32 -#define VLD2 vld2q_f32 +#define VLD2 vld2q_f32 #define VSWAPPAIRS(x) (vrev64q_f32(x)) @@ -58,7 +58,7 @@ typedef float32x4x2_t VS; #define VBLEND(x,y) (vcombine_f32(vget_low_f32(x), vget_high_f32(y))) -__INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) { +static FFTS_INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) { data_t __attribute__ ((aligned(16))) d[4] = {f0, f1, f2, f3}; return VLD(d); } @@ -69,29 +69,26 @@ __INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) { #define FFTS_MALLOC(d,a) (valloc(d)) #define FFTS_FREE(d) (free(d)) -__INLINE void STORESPR(data_t * addr, VS p) { - +static FFTS_INLINE void STORESPR(data_t * addr, VS p) { vst1q_f32(addr, p.val[0]); vst1q_f32(addr + 4, p.val[1]); - } -__INLINE V IMULI(int inv, V a) { - if(inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); +static FFTS_INLINE V IMULI(int inv, V a) { + if (inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); else return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); } -__INLINE V IMUL(V d, V re, V im) { - re = VMUL(re, d); +static FFTS_INLINE V IMUL(V d, V re, V im) { + re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); + return VSUB(re, im); } -__INLINE V IMULJ(V d, V re, V im) { - re = VMUL(re, d); +static FFTS_INLINE V IMULJ(V d, V re, V im) { + re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); return VADD(re, im); } -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_MACROS_NEON_H */ -- cgit v1.1 From c7f2b486a3e5a6b875c4703cedf3e3995f642491 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 16:24:32 +0200 Subject: Add explicit '.fpu' directive --- src/neon.s | 1 + src/neon_static_f.s | 1 + src/neon_static_i.s | 1 + 3 files changed, 3 insertions(+) diff --git a/src/neon.s b/src/neon.s index 6995066..ec98250 100644 --- a/src/neon.s +++ b/src/neon.s @@ -30,6 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + .fpu neon .align 4 #ifdef __APPLE__ diff --git a/src/neon_static_f.s b/src/neon_static_f.s index 920d13c..bb0d717 100644 --- a/src/neon_static_f.s +++ b/src/neon_static_f.s @@ -30,6 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + .fpu neon .align 4 #ifdef __APPLE__ diff --git a/src/neon_static_i.s b/src/neon_static_i.s index cfa766c..5edc908 100644 --- a/src/neon_static_i.s +++ b/src/neon_static_i.s @@ -30,6 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + .fpu neon .align 4 #ifdef __APPLE__ -- cgit v1.1 From c06f06846f178a8504d186864fc7e5be37cd2ed8 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 18:28:38 +0200 Subject: Fix conflicting types for 'temp0' --- src/ffts.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ffts.c b/src/ffts.c index 84a1145..41c0b48 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -329,12 +329,14 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) //w = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); float *fw = (float *)w; #ifdef HAVE_NEON + { VS temp0, temp1, temp2; for (j=0; j Date: Wed, 11 Mar 2015 18:29:59 +0200 Subject: Automatically detect ARM FPU and float ABI --- CMakeLists.txt | 91 ++++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 73 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 537da41..2fa2a3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,6 +32,8 @@ option(ENABLE_SHARED ) include(CheckIncludeFile) +include(CheckSymbolExists) +include(CMakePushCheckState) add_definitions(-DFFTS_CMAKE_GENERATED) @@ -62,19 +64,78 @@ if(HAVE_UNISTD_H) add_definitions(-DHAVE_UNISTD_H) endif(HAVE_UNISTD_H) -# check if the platform has support for SSE SIMD extension -check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) -if(HAVE_XMMINTRIN_H) - add_definitions(-DHAVE_SSE) -else() - # check if the platform has support NEON SIMD extension - check_include_file(arm_neon.h HAVE_ARM_NEON_H) - if(HAVE_ARM_NEON_H) - add_definitions(-DHAVE_NEON) +# Determinate if we are cross-compiling +if(NOT CMAKE_CROSSCOMPILING) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + # Determinate what floating-point hardware + # (or hardware emulation) is available + # + # Test compilation with -mfpu=neon + cmake_push_check_state() + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfpu=neon") + check_symbol_exists(exit stdlib.h NEON_AVAILABLE) + if(NOT NEON_AVAILABLE) + cmake_reset_check_state() + + # Test compilation with -mfpu=vfp + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfpu=vfp") + check_symbol_exists(exit stdlib.h VFP_AVAILABLE) + if(NOT VFP_AVAILABLE) + message(WARNING "FFTS is using 'soft' FPU") + else() + message("FFTS is using 'vfp' FPU") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") + + set(ENABLE_NEON 0) + set(ENABLE_VFP 1) + endif(NOT SOFTFP_AVAILABLE) + else() + message("FFTS is using 'neon' FPU") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") + + set(ENABLE_NEON 1) + set(ENABLE_VFP 0) + endif(NOT NEON_AVAILABLE) + + # Determinate float ABI if NEON or VFP is used + if(NEON_AVAILABLE OR VFP_AVAILABLE) + cmake_push_check_state() + + # Test compilation with -mfloat-abi=hard + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=hardfp") + check_symbol_exists(exit stdlib.h HARDFP_AVAILABLE) + if(NOT HARDFP_AVAILABLE) + cmake_reset_check_state() + + # Test compilation with -mfloat-abi=hard + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=softfp") + check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE) + if(NOT SOFTFP_AVAILABLE) + # Most likely development libraries are missing + message(WARNING "FFTS is using 'soft' float ABI") + else() + message("FFTS is using 'softfp' float ABI") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") + endif(NOT SOFTFP_AVAILABLE) + else() + message(WARNING "FFTS is using 'hard' float ABI") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard") + endif(NOT HARDFP_AVAILABLE) + + cmake_pop_check_state() + endif(NEON_AVAILABLE OR VFP_AVAILABLE) + + cmake_pop_check_state() else() - add_definitions(-DHAVE_VFP) - endif(HAVE_ARM_NEON_H) -endif(HAVE_XMMINTRIN_H) + # check if the platform has support for SSE SIMD extension + check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) + if(HAVE_XMMINTRIN_H) + add_definitions(-DHAVE_SSE) + endif(HAVE_XMMINTRIN_H) + endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") +else() + # Check if we can always use detection code above? +endif(NOT CMAKE_CROSSCOMPILING) # compiler settings if(MSVC) @@ -138,9 +199,6 @@ if(ENABLE_NEON) ) endif(DISABLE_DYNAMIC_CODE) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") - add_definitions(-DHAVE_NEON) elseif(ENABLE_VFP) if(NOT DISABLE_DYNAMIC_CODE) @@ -149,9 +207,6 @@ elseif(ENABLE_VFP) ) endif(NOT DISABLE_DYNAMIC_CODE) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") - add_definitions(-DHAVE_VFP) elseif(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) -- cgit v1.1 From 72fc3742112c384a2af02d0b51c98cedccc7165f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 18:33:14 +0200 Subject: Fix conflicting types --- src/ffts.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ffts.c b/src/ffts.c index 41c0b48..4474a9f 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -385,6 +385,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #if defined(__arm__) && !defined(DYNAMIC_DISABLED) #ifdef HAVE_NEON + { VS temp0, temp1, temp2; for (j = 0; j < n/8; j += 4) { temp0 = VLD2(fw0 + j*2); @@ -397,6 +398,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) temp2.val[1] = VXOR(temp2.val[1], neg); STORESPR(fw + j*2*3 + 16, temp2); } + } #else for (j = 0; j < n/8; j += 1) { fw[j*6] = fw0[j*2]; -- cgit v1.1 From 69b7770ec32dbda9d4fcca198e830f84256640fc Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 11 Mar 2015 18:50:27 +0200 Subject: Try to execute detection quietly without messages --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2fa2a3f..12b3bf8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,8 +70,12 @@ if(NOT CMAKE_CROSSCOMPILING) # Determinate what floating-point hardware # (or hardware emulation) is available # - # Test compilation with -mfpu=neon cmake_push_check_state() + + # Try to execute quietly without messages + set(CMAKE_REQUIRED_QUIET 1) + + # Test compilation with -mfpu=neon set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfpu=neon") check_symbol_exists(exit stdlib.h NEON_AVAILABLE) if(NOT NEON_AVAILABLE) -- cgit v1.1 From 4cfaf45051e43c00ea9ac5ac996da246817e4c10 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 12 Mar 2015 13:06:57 +0200 Subject: Initial steps to support double precision. Replace data_t with float, and cdata_t with ffts_cpx_32f. --- src/ffts.c | 44 +++---- src/ffts_small.c | 374 ++++++++++++++++++++++++++++++++++++++++--------------- src/ffts_small.h | 85 +++++++++++-- src/macros.h | 45 +++++-- src/types.h | 10 +- 5 files changed, 413 insertions(+), 145 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 4474a9f..94d6f1b 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -207,7 +207,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) int hardcoded; size_t lut_size; size_t n_luts; - cdata_t *w; + ffts_cpx_32f *w; size_t i; size_t n; @@ -243,19 +243,19 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) if (!i || hardcoded) { #if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (N <= 32) { - lut_size += n/4 * 2 * sizeof(cdata_t); + lut_size += n/4 * 2 * sizeof(ffts_cpx_32f); } else { - lut_size += n/4 * sizeof(cdata_t); + lut_size += n/4 * sizeof(ffts_cpx_32f); } #else - lut_size += n/4 * 2 * sizeof(cdata_t); + lut_size += n/4 * 2 * sizeof(ffts_cpx_32f); #endif n *= 2; } else { #if defined(__arm__) && !defined(DYNAMIC_DISABLED) - lut_size += n/8 * 3 * sizeof(cdata_t); + lut_size += n/8 * 3 * sizeof(ffts_cpx_32f); #else - lut_size += n/8 * 3 * 2 * sizeof(cdata_t); + lut_size += n/8 * 3 * 2 * sizeof(ffts_cpx_32f); #endif } n *= 2; @@ -289,11 +289,11 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #endif for (i = 0; i < n_luts; i++) { - p->ws_is[i] = w - (cdata_t *)p->ws; + p->ws_is[i] = w - (ffts_cpx_32f*) p->ws; //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]); if(!i || hardcoded) { - cdata_t *w0 = FFTS_MALLOC(n/4 * sizeof(cdata_t), 32); + ffts_cpx_32f *w0 = FFTS_MALLOC(n/4 * sizeof(ffts_cpx_32f), 32); float *fw0 = (float*) w0; float *fw = (float *)w; @@ -305,7 +305,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #if defined(__arm__) && !defined(DYNAMIC_DISABLED) if (N < 32) { - // w = FFTS_MALLOC(n/4 * 2 * sizeof(cdata_t), 32); + // w = FFTS_MALLOC(n/4 * 2 * sizeof(ffts_cpx_32f), 32); float *fw = (float *)w; V temp0, temp1, temp2; for (j=0; jtransform = &ffts_firstpass_2; + p->transform = &ffts_small_2_32f; break; case 4: if (sign == -1) { - p->transform = &ffts_firstpass_4_f; + p->transform = &ffts_small_forward4_32f; } else if (sign == 1) { - p->transform = &ffts_firstpass_4_b; + p->transform = &ffts_small_backward4_32f; } break; case 8: if (sign == -1) { - p->transform = &ffts_firstpass_8_f; + p->transform = &ffts_small_forward8_32f; } else if (sign == 1) { - p->transform = &ffts_firstpass_8_b; + p->transform = &ffts_small_backward8_32f; } break; case 16: default: if (sign == -1) { - p->transform = &ffts_firstpass_16_f; + p->transform = &ffts_small_forward16_32f; } else { - p->transform = &ffts_firstpass_16_b; + p->transform = &ffts_small_backward16_32f; } break; } diff --git a/src/ffts_small.c b/src/ffts_small.c index ccc3ab0..34be7af 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -1,104 +1,140 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2013, Michael J. Cree - Copyright (c) 2012, 2013, Anthony M. Blake - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2013, Michael J. Cree +Copyright (c) 2012, 2013, Anthony M. Blake + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_small.h" + #include "ffts_internal.h" #include "macros.h" -void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - float *LUT8 = (float*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, r0, r1; - L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); -} + /* unreferenced parameter */ + (void) p; -void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out) -{ - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - float *LUT8 = (float*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[2]; + t1[1] = din[3]; - L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); + r0[0] = t0[0] + t1[0]; + r0[1] = t0[1] + t1[1]; + r1[0] = t0[0] - t1[0]; + r1[1] = t0[1] - t1[1]; + + dout[0] = r0[0]; + dout[1] = r0[1]; + dout[2] = r1[0]; + dout[3] = r1[1]; } -void ffts_firstpass_8_f(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - V r0_1, r2_3, r4_5, r6_7; - float *LUT8 = (float*) p->ws + p->ws_is[0]; + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, r0, r1; - L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[2]; + t1[1] = din[3]; + + r0[0] = t0[0] + t1[0]; + r0[1] = t0[1] + t1[1]; + r1[0] = t0[0] - t1[0]; + r1[1] = t0[1] - t1[1]; + + dout[0] = r0[0]; + dout[1] = r0[1]; + dout[2] = r1[0]; + dout[3] = r1[1]; } -void ffts_firstpass_8_b(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - V r0_1, r2_3, r4_5, r6_7; - float *LUT8 = (float*) p->ws + p->ws_is[0]; + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; - L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] + t7[1]; + dout[3] = t5[1] - t7[0]; + dout[6] = t5[0] - t7[1]; + dout[7] = t5[1] + t7[0]; } -void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - cdata_t t0, t1, t2, t3, t4, t5, t6, t7; + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; - /* unreferenced parameter */ - (void) p; + /* unreferenced parameter */ + (void) p; t0[0] = din[0]; t0[1] = din[1]; @@ -128,14 +164,15 @@ void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out) dout[7] = t5[1] + t7[0]; } -void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - cdata_t t0, t1, t2, t3, t4, t5, t6, t7; + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; - /* unreferenced parameter */ - (void) p; + /* unreferenced parameter */ + (void) p; t0[0] = din[0]; t0[1] = din[1]; @@ -165,27 +202,168 @@ void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out) dout[7] = t5[1] - t7[0]; } -void ffts_firstpass_2(ffts_plan_t *p, const void *in, void *out) +void +ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out) { - const data_t *din = (const data_t*) in; - data_t *dout = (data_t*) out; - cdata_t t0, t1, r0, r1; + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; - /* unreferenced parameter */ - (void) p; + /* unreferenced parameter */ + (void) p; t0[0] = din[0]; t0[1] = din[1]; - t1[0] = din[2]; - t1[1] = din[3]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; - r0[0] = t0[0] + t1[0]; - r0[1] = t0[1] + t1[1]; - r1[0] = t0[0] - t1[0]; - r1[1] = t0[1] - t1[1]; + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; - dout[0] = r0[0]; - dout[1] = r0[1]; - dout[2] = r1[0]; - dout[3] = r1[1]; + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] - t7[1]; + dout[3] = t5[1] + t7[0]; + dout[6] = t5[0] + t7[1]; + dout[7] = t5[1] - t7[0]; +} + +void +ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + V r0_1, r2_3, r4_5, r6_7; + float *LUT8 = (float*) p->ws + p->ws_is[0]; + + L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +} + +void +ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + V r0_1, r2_3, r4_5, r6_7; + double *LUT8 = (double*) p->ws + p->ws_is[0]; + +#if MACROS_READY + L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +#endif +} + +void +ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + V r0_1, r2_3, r4_5, r6_7; + float *LUT8 = (float*) p->ws + p->ws_is[0]; + + L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +} + +void +ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + V r0_1, r2_3, r4_5, r6_7; + double *LUT8 = (double*) p->ws + p->ws_is[0]; + +#if MACROS_READY + L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +#endif +} + +void +ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + float *LUT8 = (float*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +} + +void +ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + double *LUT8 = (double*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + +#ifdef MACROS_READY + L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +#endif +} + +void +ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + float *LUT8 = (float*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +} + +void +ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + double *LUT8 = (double*) p->ws; + V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + +#ifdef MACROS_READY + L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +#endif } \ No newline at end of file diff --git a/src/ffts_small.h b/src/ffts_small.h index 5ae48cc..249dcc9 100644 --- a/src/ffts_small.h +++ b/src/ffts_small.h @@ -1,14 +1,85 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2013, Michael J. Cree +Copyright (c) 2012, 2013, Anthony M. Blake + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + #ifndef FFTS_SMALL_H #define FFTS_SMALL_H +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + #include "ffts.h" -void ffts_firstpass_16_f(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_16_b(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_8_f(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_8_b(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_4_f(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_4_b(ffts_plan_t *p, const void *in, void *out); -void ffts_firstpass_2(ffts_plan_t *p, const void *in, void *out); +void +ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out); #endif /* FFTS_SMALL_H */ diff --git a/src/macros.h b/src/macros.h index b4a6a5a..fc53ae4 100644 --- a/src/macros.h +++ b/src/macros.h @@ -48,7 +48,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "macros-alpha.h" #endif -static FFTS_INLINE void TX2(V *a, V *b) +static FFTS_INLINE void +TX2(V *a, V *b) { V TX2_t0 = VUNPACKLO(*a, *b); V TX2_t1 = VUNPACKHI(*a, *b); @@ -56,7 +57,8 @@ static FFTS_INLINE void TX2(V *a, V *b) *b = TX2_t1; } -static FFTS_INLINE void K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void +K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) { V uk, uk2, zk_p, zk_n, zk, zk_d; @@ -75,9 +77,16 @@ static FFTS_INLINE void K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) *r1 = VSUB(uk2, zk_d); } -static FFTS_INLINE void L_2_4(int inv, const data_t* FFTS_RESTRICT i0, const data_t* FFTS_RESTRICT i1, - const data_t* FFTS_RESTRICT i2, const data_t* FFTS_RESTRICT i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void +L_2_4(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V *r0, + V *r1, + V *r2, + V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; @@ -105,9 +114,16 @@ static FFTS_INLINE void L_2_4(int inv, const data_t* FFTS_RESTRICT i0, const dat *r2 = VUNPACKHI(t2, t3); } -static FFTS_INLINE void L_4_4(int inv, const data_t* FFTS_RESTRICT i0, const data_t* FFTS_RESTRICT i1, - const data_t* FFTS_RESTRICT i2, const data_t* FFTS_RESTRICT i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void +L_4_4(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V *r0, + V *r1, + V *r2, + V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; @@ -136,9 +152,16 @@ static FFTS_INLINE void L_4_4(int inv, const data_t* FFTS_RESTRICT i0, const dat *r3 = t3; } -static FFTS_INLINE void L_4_2(int inv, const data_t * FFTS_RESTRICT i0, const data_t * FFTS_RESTRICT i1, - const data_t * FFTS_RESTRICT i2, const data_t * FFTS_RESTRICT i3, - V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void +L_4_2(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V *r0, + V *r1, + V *r2, + V *r3) { V t0, t1, t2, t3, t4, t5, t6, t7; diff --git a/src/types.h b/src/types.h index 749d387..f8997ce 100644 --- a/src/types.h +++ b/src/types.h @@ -38,12 +38,8 @@ #pragma once #endif -#if defined(_Complex_I) && defined(complex) && defined(I) -typedef complex float cdata_t; -#else -typedef float cdata_t[2]; -#endif - -typedef float data_t; +/* Define complex number as two element array */ +typedef float ffts_cpx_32f[2]; +typedef double ffts_cpx_64f[2]; #endif /* FFTS_TYPES_H */ -- cgit v1.1 From a87f60f2462bab629d6891e51e7f7fc2956ff2c8 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 12 Mar 2015 13:07:44 +0200 Subject: Remove unused variable 'i' from 'ffts_generate_func_code' --- src/codegen.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index b70f011..fc407cb 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -118,7 +118,6 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N uint32_t loop_count; int count; - int i; ptrdiff_t len; size_t *ps; @@ -408,10 +407,8 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N } fp += (neon_eo - neon_oo) / 4; } - } - if(p->i1) { ADDI(&fp, 2, 3, 0); ADDI(&fp, 3, 7, 0); @@ -452,7 +449,6 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N fp[57] ^= 0x00200000; } fp += (neon_oo - neon_ee) / 4; - } #else ADDI(&fp, 2, 7, 0); -- cgit v1.1 From 18d144f510af8e6dc5f5383723666bdc34a449f1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 12 Mar 2015 13:35:37 +0200 Subject: Remove unused neon_float.h header --- src/neon_float.h | 1127 ------------------------------------------------------ 1 file changed, 1127 deletions(-) delete mode 100644 src/neon_float.h diff --git a/src/neon_float.h b/src/neon_float.h deleted file mode 100644 index 9aeab1c..0000000 --- a/src/neon_float.h +++ /dev/null @@ -1,1127 +0,0 @@ -/* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ -#ifndef __NEON_FLOAT_H__ -#define __NEON_FLOAT_H__ - -#include - -//#define VL 4 -#define __INLINE static inline __attribute__((always_inline)) - -typedef float32x4_t V; - -typedef float32x4x2_t VS; - -#if defined(complex) - typedef complex float cdata_t; -#else - typedef float cdata_t[2]; -#endif - typedef float data_t; - -#define ADD vaddq_f32 -#define SUB vsubq_f32 -#define MUL vmulq_f32 -#define VADD vaddq_f32 -#define VSUB vsubq_f32 -#define VMUL vmulq_f32 -#define VXOR(x,y) (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y)))) -#define VST vst1q_f32 -#define VLD vld1q_f32 -#define VST2 vst2q_f32 -#define VLD2 vld2q_f32 - -#define VSWAPPAIRS(x) (vrev64q_f32(x)) - -#define VUNPACKHI(a,b) (vcombine_f32(vget_high_f32(a), vget_high_f32(b))) -#define VUNPACKLO(a,b) (vcombine_f32(vget_low_f32(a), vget_low_f32(b))) - -#define VBLEND(x,y) (vcombine_f32(vget_low_f32(x), vget_high_f32(y))) - -__INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) { - data_t __attribute__ ((aligned(16))) d[4] = {f0, f1, f2, f3}; - return VLD(d); -} - -#define VDUPRE(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0)) -#define VDUPIM(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1)) - -#define FFTS_MALLOC(d,a) (valloc(d)) -#define FFTS_FREE(d) (free(d)) -__INLINE void FMA(V *Rd, V Rn, V Rm) { - *Rd = vmlaq_f32(*Rd, Rn, Rm); -// __asm__ ("vmla.f32 %q0,%q1,%q2\n\t" -// : "+w" (*Rd) -// : "w" (Rn), "w" (Rm) -// //: "0" -// ); - -} -__INLINE void FMS(V *Rd, V Rn, V Rm) { - *Rd = vmlsq_f32(*Rd, Rn, Rm); -// __asm__ ("vmls.f32 %q0,%q1,%q2\n\t" -// : "+w" (*Rd) -// : "w" (Rn), "w" (Rm) -// // : "0" -// ); -} - -__INLINE VS VSMUL(VS *d, VS *w) { - VS t; - t.val[0] = vmulq_f32(d->val[0], w->val[0]); - t.val[1] = vmulq_f32(d->val[0], w->val[1]); -// t.val[0] = vmlsq_f32(t.val[0], d->val[1], w->val[1]); -// t.val[1] = vmlaq_f32(t.val[1], d->val[1], w->val[0]); - FMS(&t.val[0], d->val[1], w->val[1]); - FMA(&t.val[1], d->val[1], w->val[0]); - return t; -} -__INLINE VS VSMULJ(VS *d, VS *w) { - VS t; - t.val[0] = vmulq_f32(d->val[0], w->val[0]); - t.val[1] = vmulq_f32(d->val[1], w->val[0]); -// t.val[0] = vmlaq_f32(t.val[0], d->val[1], w->val[1]); -// t.val[1] = vmlsq_f32(t.val[1], d->val[0], w->val[1]); - FMA(&t.val[0], d->val[1], w->val[1]); - FMS(&t.val[1], d->val[0], w->val[1]); - return t; -} -__INLINE VS VSADD(VS *a, VS *b) { - VS r; - r.val[0] = vaddq_f32(a->val[0], b->val[0]); - r.val[1] = vaddq_f32(a->val[1], b->val[1]); - return r; -} -__INLINE VS VSSUB(VS *a, VS *b) { - VS r; - r.val[0] = vsubq_f32(a->val[0], b->val[0]); - r.val[1] = vsubq_f32(a->val[1], b->val[1]); - return r; -} -__INLINE VS VSSUB_MULI(VS *a, VS *b) { - VS r; - r.val[0] = vaddq_f32(a->val[0], b->val[1]); - r.val[1] = vsubq_f32(a->val[1], b->val[0]); - return r; -} -__INLINE VS VSADD_MULI(VS *a, VS *b) { - VS r; - r.val[0] = vsubq_f32(a->val[0], b->val[1]); - r.val[1] = vaddq_f32(a->val[1], b->val[0]); - return r; -} - -__INLINE void VSK_N(VS w, VS *r0, VS *r1, VS *r2, VS *r3) { - VS uk, uk2, zk_p, zk_n, zk, zk_d; - uk = *r0; uk2 = *r1; - zk_p = VSMUL(r2, &w); - zk_n = VSMULJ(r3, &w); - - zk = VSADD(&zk_p, &zk_n); - zk_d = VSSUB(&zk_p, &zk_n); - - *r2 = VSSUB(&uk, &zk); - *r0 = VSADD(&uk, &zk); - *r3 = VSADD_MULI(&uk2, &zk_d); - *r1 = VSSUB_MULI(&uk2, &zk_d); -} - - -__INLINE float32x2x2_t HVS_ADD(float32x2x2_t a, float32x2x2_t b) { - float32x2x2_t rval; - rval.val[0] = vadd_f32(a.val[0], b.val[0]); - rval.val[1] = vadd_f32(a.val[1], b.val[1]); - return rval; -} -__INLINE float32x2x2_t HVS_SUB(float32x2x2_t a, float32x2x2_t b) { - float32x2x2_t rval; - rval.val[0] = vsub_f32(a.val[0], b.val[0]); - rval.val[1] = vsub_f32(a.val[1], b.val[1]); - return rval; -} -__INLINE float32x2x2_t HVS_SUB_MULI(float32x2x2_t a, float32x2x2_t b) { - float32x2x2_t rval; - rval.val[0] = vadd_f32(a.val[0], b.val[1]); - rval.val[1] = vsub_f32(a.val[1], b.val[0]); - return rval; -} -__INLINE float32x2x2_t HVS_ADD_MULI(float32x2x2_t a, float32x2x2_t b) { - float32x2x2_t rval; - rval.val[0] = vsub_f32(a.val[0], b.val[1]); - rval.val[1] = vadd_f32(a.val[1], b.val[0]); - return rval; -} -__INLINE float32x2x2_t HVS_MUL(float32x2x2_t d, float32x2x2_t w) { - float32x2x2_t t; - t.val[0] = vmul_f32(d.val[0], w.val[0]); - t.val[1] = vmul_f32(d.val[0], w.val[1]); - t.val[0] = vmls_f32(t.val[0], d.val[1], w.val[1]); - t.val[1] = vmla_f32(t.val[1], d.val[1], w.val[0]); - return t; -} -__INLINE float32x2x2_t HVS_MULJ(float32x2x2_t d, float32x2x2_t w) { - float32x2x2_t t; - t.val[0] = vmul_f32(d.val[0], w.val[0]); - t.val[1] = vmul_f32(d.val[1], w.val[0]); - t.val[0] = vmla_f32(t.val[0], d.val[1], w.val[1]); - t.val[1] = vmls_f32(t.val[1], d.val[0], w.val[1]); - return t; -} -__INLINE void HVS_K_N(float32x2x2_t w, float32x2x2_t *r0, float32x2x2_t *r1, float32x2x2_t *r2, float32x2x2_t *r3) { - float32x2x2_t uk, uk2, zk_p, zk_n, zk, zk_d; - uk = *r0; uk2 = *r1; - zk_p = HVS_MUL(*r2, w); - zk_n = HVS_MULJ(*r3, w); - zk = HVS_ADD(zk_p, zk_n); - zk_d = HVS_SUB(zk_p, zk_n); - - *r2 = HVS_SUB(uk, zk); - *r0 = HVS_ADD(uk, zk); - *r3 = HVS_ADD_MULI(uk2, zk_d); - *r1 = HVS_SUB_MULI(uk2, zk_d); -} - -typedef union { - float32x4_t f32x4; - float32x2x2_t f32x2x2; -} float_mixed_t; - -__INLINE void VSWP(float32x2x2_t *a, float32x2x2_t *b) { -//float32x2_t tmp = a->val[1]; -//a->val[1] = b->val[0]; -//b->val[0] = tmp; - __asm__ ("vswp %0,%1\n\t" - : "+w" (a->val[1]), "+w" (b->val[0]) - : - ); -} - -static const __attribute__ ((aligned(16))) float ee_w_data[4] = {0.70710678118654757273731092936941,0.70710678118654746171500846685376, - -0.70710678118654757273731092936941,-0.70710678118654746171500846685376}; -__INLINE void LEAF_EE8_SPLIT(size_t ** restrict is, const data_t * restrict in, size_t ** restrict out_offsets, data_t * restrict out) { - data_t *out0 = out + (*out_offsets)[0]; - data_t *out1 = out + (*out_offsets)[1]; - *out_offsets += 2; - - float32x2x2_t r0, r1, r2, r3, r4, r5, r6, r7; - float32x2x2_t t0, t1, t2, t3, t4, t5, t6, t7; - - t0 = vld2_f32(in + (*is)[0]); t1 = vld2_f32(in + (*is)[1]); t2 = vld2_f32(in + (*is)[2]); t3 = vld2_f32(in + (*is)[3]); - - t4 = HVS_ADD (t0, t1); - t5 = HVS_SUB (t0, t1); - t6 = HVS_ADD (t2, t3); - t7 = HVS_SUB (t2, t3); - r0 = HVS_ADD (t4, t6); - r2 = HVS_SUB (t4, t6); - r1 = HVS_SUB_MULI(t5, t7); - r3 = HVS_ADD_MULI(t5, t7); - - t0 = vld2_f32(in + (*is)[4]); t1 = vld2_f32(in + (*is)[5]); t2 = vld2_f32(in + (*is)[6]); t3 = vld2_f32(in + (*is)[7]); - r4 = HVS_ADD (t0, t1); - r5 = HVS_SUB (t0, t1); - r6 = HVS_ADD (t2, t3); - r7 = HVS_SUB (t2, t3); - t0 = r0; t1 = r2; - t2 = HVS_ADD(r4, r6); - t3 = HVS_SUB(r4, r6); - r0 = HVS_ADD(t0, t2); - r4 = HVS_SUB(t0, t2); - r2 = HVS_SUB_MULI(t1, t3); - r6 = HVS_ADD_MULI(t1, t3); - - float32x4_t w = vld1q_f32(ee_w_data); - float32x2x2_t ww; - ww.val[0] = vget_low_f32(w); - ww.val[1] = vget_high_f32(w); - - HVS_K_N(ww,&r1,&r3,&r5,&r7); - -//vst2_f32(out0, r0); -//vst2_f32(out0+4, r2); -//vst2_f32(out0+8, r4); -//vst2_f32(out0+12, r6); - -//vst2_f32(out1, r1); -//vst2_f32(out1+4, r3); -//vst2_f32(out1+8, r5); -//vst2_f32(out1+12, r7); - - float32x2x2_t tt0, tt1, tt2, tt3, tt4, tt5, tt6, tt7; - - tt0 = vtrn_f32(r0.val[0], r0.val[1]); - tt1 = vtrn_f32(r1.val[0], r1.val[1]); - tt2 = vtrn_f32(r2.val[0], r2.val[1]); - tt3 = vtrn_f32(r3.val[0], r3.val[1]); - tt4 = vtrn_f32(r4.val[0], r4.val[1]); - tt5 = vtrn_f32(r5.val[0], r5.val[1]); - tt6 = vtrn_f32(r6.val[0], r6.val[1]); - tt7 = vtrn_f32(r7.val[0], r7.val[1]); - -//VSWP(&tt0.f32x2x2, &tt1.f32x2x2); -//VSWP(&tt2.f32x2x2, &tt3.f32x2x2); -//VSWP(&tt4.f32x2x2, &tt5.f32x2x2); -//VSWP(&tt6.f32x2x2, &tt7.f32x2x2); - - float32x4_t z0, z1, z2, z3, z4, z5, z6, z7; - - z0 = vcombine_f32(tt0.val[0], tt1.val[0]); - z1 = vcombine_f32(tt0.val[1], tt1.val[1]); - z2 = vcombine_f32(tt2.val[0], tt3.val[0]); - z3 = vcombine_f32(tt2.val[1], tt3.val[1]); - z4 = vcombine_f32(tt4.val[0], tt5.val[0]); - z5 = vcombine_f32(tt4.val[1], tt5.val[1]); - z6 = vcombine_f32(tt6.val[0], tt7.val[0]); - z7 = vcombine_f32(tt6.val[1], tt7.val[1]); - - - vst1q_f32(out0, z0); - vst1q_f32(out0+4, z2); - vst1q_f32(out0+8, z4); - vst1q_f32(out0+12, z6); - - vst1q_f32(out1, z1); - vst1q_f32(out1+4, z3); - vst1q_f32(out1+8, z5); - vst1q_f32(out1+12, z7); -/* - vst1_f32(out0, tt0.val[0]); - vst1_f32(out0+2, tt1.val[0]); - vst1_f32(out0+4, tt2.val[0]); - vst1_f32(out0+6, tt3.val[0]); - vst1_f32(out0+8, tt4.val[0]); - vst1_f32(out0+10, tt5.val[0]); - vst1_f32(out0+12, tt6.val[0]); - vst1_f32(out0+14, tt7.val[0]); - - vst1_f32(out1, tt0.val[1]); - vst1_f32(out1+2, tt1.val[1]); - vst1_f32(out1+4, tt2.val[1]); - vst1_f32(out1+6, tt3.val[1]); - vst1_f32(out1+8, tt4.val[1]); - vst1_f32(out1+10, tt5.val[1]); - vst1_f32(out1+12, tt6.val[1]); - vst1_f32(out1+14, tt7.val[1]); - */ -/* - float32x4_t rr0 = vcombine_f32(r0.val[0], r0.val[1]); - float32x4_t rr1 = vcombine_f32(r1.val[0], r1.val[1]); - float32x4_t rr2 = vcombine_f32(r2.val[0], r2.val[1]); - float32x4_t rr3 = vcombine_f32(r3.val[0], r3.val[1]); - - float32x4x2_t tmp0, tmp1, tmp2, tmp3; - tmp0 = vtrnq_f32(rr0, rr2); - tmp1 = vtrnq_f32(rr1, rr3); - - - float32x2x2_t v0, v1, v2, v3; - v0.val[0] = vget_low_f32(tmp0.val[0]); - v0.val[1] = vget_high_f32(tmp0.val[0]); - v1.val[0] = vget_low_f32(tmp0.val[1]); - v1.val[1] = vget_high_f32(tmp0.val[1]); - v2.val[0] = vget_low_f32(tmp1.val[0]); - v2.val[1] = vget_high_f32(tmp1.val[0]); - v3.val[0] = vget_low_f32(tmp1.val[1]); - v3.val[1] = vget_high_f32(tmp1.val[1]); - - tmp2.val[0] = tmp0.val[0]; - tmp2.val[1] = tmp1.val[0]; - tmp3.val[0] = tmp0.val[1]; - tmp3.val[1] = tmp1.val[1]; - -//vst2q_f32(out0 , tmp2); -//vst2q_f32(out1 , tmp3); - vst2_f32(out0, v0); - vst2_f32(out0+4, v1); - vst2_f32(out1, v2); - vst2_f32(out1+4, v3); - - float32x4_t rr4 = vcombine_f32(r4.val[0], r4.val[1]); - float32x4_t rr5 = vcombine_f32(r5.val[0], r5.val[1]); - float32x4_t rr6 = vcombine_f32(r6.val[0], r6.val[1]); - float32x4_t rr7 = vcombine_f32(r7.val[0], r7.val[1]); - - tmp0 = vtrnq_f32(rr4, rr6); - tmp1 = vtrnq_f32(rr5, rr7); - - tmp2.val[0] = tmp0.val[0]; - tmp2.val[1] = tmp1.val[0]; - tmp3.val[0] = tmp0.val[1]; - tmp3.val[1] = tmp1.val[1]; - v0.val[0] = vget_low_f32(tmp0.val[0]); - v0.val[1] = vget_high_f32(tmp0.val[0]); - v1.val[0] = vget_low_f32(tmp0.val[1]); - v1.val[1] = vget_high_f32(tmp0.val[1]); - v2.val[0] = vget_low_f32(tmp1.val[0]); - v2.val[1] = vget_high_f32(tmp1.val[0]); - v3.val[0] = vget_low_f32(tmp1.val[1]); - v3.val[1] = vget_high_f32(tmp1.val[1]); - vst2_f32(out0+8, v0); - vst2_f32(out0+12, v1); - vst2_f32(out1+8, v1); - vst2_f32(out1+12, v3); - -//vst2q_f32(out0 + 8, tmp2); -//vst2q_f32(out1 + 8, tmp3); -//vst1q_f32(out0+8, tmp0.val[0]); -//vst1q_f32(out0+12,tmp0.val[1]); -//vst1q_f32(out1+8, tmp1.val[0]); -//vst1q_f32(out1+12,tmp1.val[1]); - */ - *is += 8; -} - -__INLINE void STORESPR(data_t * addr, VS p) { - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t" - : - : "r" (addr), "w" (p.val[0]), "w" (p.val[1]) - : "memory"); -} -__INLINE void STORESPRI(data_t * restrict * addr, V p0, V p1) { - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); -} -__INLINE void STORESPRI0(data_t * restrict *addr, VS r) { - register V p0 __asm__ ("q0") = r.val[0]; - register V p1 __asm__ ("q1") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRI1(data_t **addr, VS r) { - register V p0 __asm__ ("q2") = r.val[0]; - register V p1 __asm__ ("q3") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRI2(data_t **addr, VS r) { - register V p0 __asm__ ("q4") = r.val[0]; - register V p1 __asm__ ("q5") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRI3(data_t **addr, VS r) { - register V p0 __asm__ ("q6") = r.val[0]; - register V p1 __asm__ ("q7") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRIT0(data_t * restrict *addr, VS r) { - register V p0 __asm__ ("q0") = r.val[0]; - register V p1 __asm__ ("q1") = r.val[1]; - __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRIT1(data_t **addr, VS r) { - register V p0 __asm__ ("q2") = r.val[0]; - register V p1 __asm__ ("q3") = r.val[1]; - __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRIT2(data_t **addr, VS r) { - register V p0 __asm__ ("q4") = r.val[0]; - register V p1 __asm__ ("q5") = r.val[1]; - __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPRIT3(data_t **addr, VS r) { - register V p0 __asm__ ("q6") = r.val[0]; - register V p1 __asm__ ("q7") = r.val[1]; - __asm__ __volatile__ ("vst2.32 {%q1,%q2}, [%0, :128]!\n\t" - : "+r" (*addr) - : "w" (p0), "w" (p1) - : "memory"); - //STORESPRI(addr, p0, p1); -} -__INLINE void STORESPR0(data_t *addr, VS r) { - register V p0 __asm__ ("q0") = r.val[0]; - register V p1 __asm__ ("q1") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t" - : - : "r" (addr), "w" (p0), "w" (p1) - : "memory"); -} -__INLINE void STORESPR1(data_t *addr, VS r) { - register V p0 __asm__ ("q2") = r.val[0]; - register V p1 __asm__ ("q3") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t" - : - : "r" (addr), "w" (p0), "w" (p1) - : "memory"); -} -__INLINE void STORESPR2(data_t *addr, VS r) { - register V p0 __asm__ ("q4") = r.val[0]; - register V p1 __asm__ ("q5") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t" - : - : "r" (addr), "w" (p0), "w" (p1) - : "memory"); -} -__INLINE void STORESPR3(data_t *addr, VS r) { - register V p0 __asm__ ("q6") = r.val[0]; - register V p1 __asm__ ("q7") = r.val[1]; - __asm__ __volatile__ ("vst1.32 {%q1,%q2}, [%0, :128]\n\t" - : - : "r" (addr), "w" (p0), "w" (p1) - : "memory"); -} -__INLINE VS LOADSPR0(data_t *addr) { - VS r; - register V p0 __asm__ ("q8") ; - register V p1 __asm__ ("q9") ; - __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t" - : "=&w" (p0), "=&w" (p1) - : "r" (addr) - ); - r.val[0] = p0; r.val[1] = p1; - return r; -} -__INLINE VS LOADSPR1(data_t *addr) { - VS r; - register V p0 __asm__ ("q10") ; - register V p1 __asm__ ("q11") ; - __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t" - : "=&w" (p0), "=&w" (p1) - : "r" (addr) - ); - r.val[0] = p0; r.val[1] = p1; - return r; -} -__INLINE VS LOADSPR2(data_t *addr) { - VS r; - register V p0 __asm__ ("q12") ; - register V p1 __asm__ ("q13") ; - __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t" - : "=&w" (p0), "=&w" (p1) - : "r" (addr) - ); - r.val[0] = p0; r.val[1] = p1; - return r; -} -__INLINE VS LOADSPR3(data_t *addr) { - VS r; - register V p0 __asm__ ("q14") ; - register V p1 __asm__ ("q15") ; - __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]\n\t" - : "=&w" (p0), "=&w" (p1) - : "r" (addr) - ); - r.val[0] = p0; r.val[1] = p1; - return r; -} -__INLINE VS LOADSPRI(data_t * restrict * addr) { - VS r; - register V p0 __asm__ ("q2") ; - register V p1 __asm__ ("q3") ; - __asm__ __volatile__("vld1.32 {%q0,%q1}, [%2, :128]!\n\t" - : "=&w" (p0), "=&w" (p1), "+r" (*addr) - : - ); - r.val[0] = p0; r.val[1] = p1; - return r; -} - -__INLINE void X_4_SPLIT(data_t * restrict data, size_t N, data_t * restrict LUT) { - -//size_t i; -//for(i=0;i0;--k) { - VS r0, r1, r2, r3, r4, r5, r6, r7,w; - r0 = LOADSPR0(data0); - r2 = LOADSPR1(data2); - r1 = LOADSPR2(data1); - r3 = LOADSPR3(data3); - VSK_N(LOADSPRI(&LUT), &r0, &r1, &r2, &r3); - STORESPR2(data1, r1); - STORESPR3(data3, r3); - r4 = LOADSPR2(data4); - r6 = LOADSPR3(data6); - VSK_N(LOADSPRI(&LUT), &r0, &r2, &r4, &r6); - STORESPRI0(&data0, r0); //data0 += 8; - STORESPRI1(&data2, r2); //data2 += 8; - STORESPRI2(&data4, r4); //data4 += 8; - STORESPRI3(&data6, r6); //data6 += 8; - r1 = LOADSPR0(data1); - r3 = LOADSPR1(data3); - r5 = LOADSPR2(data5); - r7 = LOADSPR3(data7); - VSK_N(LOADSPRI(&LUT), &r1, &r3, &r5, &r7); - // LUT += 24; - STORESPRI0(&data1, r1); //data1 += 8; - STORESPRI1(&data3, r3); //data3 += 8; - STORESPRI2(&data5, r5); //data5 += 8; - STORESPRI3(&data7, r7); //data7 += 8; - } -} - -__INLINE void X_8_SPLIT_T(data_t * restrict data0, size_t N, data_t * restrict LUT) { - data_t *data2 = data0 + 2*N/4; - data_t *data4 = data0 + 4*N/4; - data_t *data6 = data0 + 6*N/4; - data_t *data1 = data0 + 1*N/4; - data_t *data3 = data0 + 3*N/4; - data_t *data5 = data0 + 5*N/4; - data_t *data7 = data0 + 7*N/4; - size_t k, n4 = N/4; - - for(k=N/8/2/2;k>0;--k) { - VS r0, r1, r2, r3, r4, r5, r6, r7,w; - r0 = LOADSPR0(data0); - r2 = LOADSPR1(data2); - r1 = LOADSPR2(data1); - r3 = LOADSPR3(data3); - VSK_N(LOADSPRI(&LUT), &r0, &r1, &r2, &r3); - STORESPR2(data1, r1); - STORESPR3(data3, r3); - r4 = LOADSPR2(data4); - r6 = LOADSPR3(data6); - VSK_N(LOADSPRI(&LUT), &r0, &r2, &r4, &r6); - STORESPRIT0(&data0, r0); //data0 += 8; - STORESPRIT1(&data2, r2); //data2 += 8; - STORESPRIT2(&data4, r4); //data4 += 8; - STORESPRIT3(&data6, r6); //data6 += 8; - r1 = LOADSPR0(data1); - r3 = LOADSPR1(data3); - r5 = LOADSPR2(data5); - r7 = LOADSPR3(data7); - VSK_N(LOADSPRI(&LUT), &r1, &r3, &r5, &r7); - STORESPRIT0(&data1, r1); //data1 += 8; - STORESPRIT1(&data3, r3); //data3 += 8; - STORESPRIT2(&data5, r5); //data5 += 8; - STORESPRIT3(&data7, r7); //data7 += 8; - } -} -__INLINE V LOAD2I(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]!\n\t" - : "=w" (o), "+r" (*addr) - : - ); - - return o; -} -__INLINE V LOAD2I_0(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag0\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_1(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag1\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_2(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag2\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_3(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag3\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_4(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag4\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_5(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag5\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_6(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag6\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOAD2I_7(const data_t **addr) { - float32x4_t o; - __asm__ ("vld2.32 {%q0}, [%1, :128]! @tag7\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} - - - -__INLINE V LOADI(const data_t **addr) { - float32x4_t o; - __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOADI_2(const data_t **addr) { - float32x4_t o; - __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t @tag2" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V LOADI_3(const data_t **addr) { - float32x4_t o; - __asm__ ("vld1.32 {%q0}, [%1, :128]!\n\t @tag3" : "=w" (o), "+r" (*addr) : ); - return o; -} -__INLINE V HSP_MUL(V *d, const V *w) { - V t; - t = vcombine_f32(vmul_f32(vget_low_f32(*d), vget_low_f32(*w)), - vmul_f32(vget_low_f32(*d), vget_high_f32(*w))); - t = vcombine_f32(vmls_f32(vget_low_f32(t), vget_high_f32(*d), vget_high_f32(*w)), - vmla_f32(vget_high_f32(t), vget_high_f32(*d), vget_low_f32(*w))); - return t; -} -__INLINE V HSP_MULJ(V *d, const V *w) { - V t; - t = vcombine_f32(vmul_f32(vget_low_f32(*d), vget_low_f32(*w)), - vmul_f32(vget_high_f32(*d), vget_low_f32(*w))); - t = vcombine_f32(vmla_f32(vget_low_f32(t), vget_high_f32(*d), vget_high_f32(*w)), - vmls_f32(vget_high_f32(t), vget_low_f32(*d), vget_high_f32(*w))); - return t; -} -__INLINE V HSP_SUB_MULI(V *a, V *b) { - return vcombine_f32(vadd_f32(vget_low_f32(*a), vget_high_f32(*b)), vsub_f32(vget_high_f32(*a), vget_low_f32(*b))); -} -__INLINE V HSP_ADD_MULI(V *a, V *b) { - return vcombine_f32(vsub_f32(vget_low_f32(*a), vget_high_f32(*b)), vadd_f32(vget_high_f32(*a), vget_low_f32(*b))); -} - -__INLINE void K_N_HSP(const V *w, V *r0, V *r1, V *r2, V *r3) { - V uk, uk2, zk_p, zk_n, zk, zk_d; - - uk = *r0; - uk2 = *r1; - zk_p = HSP_MUL(r2, w); - zk_n = HSP_MULJ(r3, w); - zk = ADD(zk_p, zk_n); - zk_d = SUB(zk_p, zk_n); - - *r2 = SUB(uk, zk); - *r0 = ADD(uk, zk); - *r3 = HSP_ADD_MULI(&uk2, &zk_d); - *r1 = HSP_SUB_MULI(&uk2, &zk_d); -} - -__INLINE void neon_shl8_ee(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) { - - V r0, r1, r2, r3, r4, r5, r6, r7; - V t0, t1, t2, t3, t4, t5, t6, t7; - - - t0 = LOAD2I_0(i0); - t1 = LOAD2I_1(i1); - t2 = LOAD2I_2(i2); - t3 = LOAD2I_3(i3); - t4 = ADD (t0, t1); - t5 = SUB (t0, t1); - t6 = ADD (t2, t3); - t7 = SUB (t2, t3); - r0 = ADD (t4, t6); - r2 = SUB (t4, t6); - r1 = HSP_SUB_MULI(&t5, &t7); - r3 = HSP_ADD_MULI(&t5, &t7); - - t0 = LOAD2I_4(i4); - t1 = LOAD2I_5(i5); - t2 = LOAD2I_6(i6); - t3 = LOAD2I_7(i7); - r4 = ADD (t0, t1); - r5 = SUB (t0, t1); - r6 = ADD (t2, t3); - r7 = SUB (t2, t3); - - t0 = r0; t1 = r2; - t2 = ADD(r4, r6); - t3 = SUB(r4, r6); - r0 = ADD(t0, t2); - r4 = SUB(t0, t2); - r2 = HSP_SUB_MULI(&t1, &t3); - r6 = HSP_ADD_MULI(&t1, &t3); - - V w = vld1q_f32(ee_w_data); - - K_N_HSP(&w,&r1,&r3,&r5,&r7); - V uk, uk2, zk, zk_d; - - float32x4x2_t tmp1 = vtrnq_f32(r0, r2); - r0 = tmp1.val[0]; - r2 = tmp1.val[1]; - float32x4x2_t tmp4 = vtrnq_f32(r1, r3); - r1 = tmp4.val[0]; - r3 = tmp4.val[1]; - register V tt0 __asm__ ("q0") = r0; - register V tt1 __asm__ ("q1") = r1; - register V tt2 __asm__ ("q2") = r2; - register V tt3 __asm__ ("q3") = r3; - __asm__ __volatile__ ("vst2.32 {q0,q1}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt0), "w"(tt1) : "memory"); - __asm__ __volatile__ ("vst2.32 {q2,q3}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt2), "w"(tt3) : "memory"); - - float32x4x2_t tmp2 = vtrnq_f32(r4, r6); - r4 = tmp2.val[0]; - r6 = tmp2.val[1]; - float32x4x2_t tmp3 = vtrnq_f32(r5, r7); - r5 = tmp3.val[0]; - r7 = tmp3.val[1]; - register V tt4 __asm__ ("q4") = r4; - register V tt5 __asm__ ("q5") = r5; - register V tt6 __asm__ ("q6") = r6; - register V tt7 __asm__ ("q7") = r7; - - __asm__ __volatile__ ("vst2.32 {q4,q5}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt4), "w"(tt5) : "memory"); - __asm__ __volatile__ ("vst2.32 {q6,q7}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt6), "w"(tt7) : "memory"); - -} - -__INLINE void neon_shl8_oo(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) { - - V r0, r1, r2, r3, r4, r5, r6, r7; - V t0, t1, t2, t3, t4, t5, t6, t7; - - t0 = LOAD2I_0(i0); - t1 = LOAD2I_1(i1); - t2 = LOAD2I_2(i2); - t3 = LOAD2I_3(i3); - t4 = ADD (t0, t1); - t5 = SUB (t0, t1); - t6 = ADD (t2, t3); - t7 = SUB (t2, t3); - r0 = ADD (t4, t6); - r2 = SUB (t4, t6); - r1 = HSP_SUB_MULI(&t5, &t7); - r3 = HSP_ADD_MULI(&t5, &t7); - - float32x4x2_t tmp1 = vtrnq_f32(r0, r2); - r0 = tmp1.val[0]; - r2 = tmp1.val[1]; - float32x4x2_t tmp4 = vtrnq_f32(r1, r3); - r1 = tmp4.val[0]; - r3 = tmp4.val[1]; - register V tt0 __asm__ ("q0") = r0; - register V tt1 __asm__ ("q1") = r1; - register V tt2 __asm__ ("q2") = r2; - register V tt3 __asm__ ("q3") = r3; - __asm__ __volatile__ ("vst2.32 {q0,q1}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt0), "w"(tt1) : "memory"); - __asm__ __volatile__ ("vst2.32 {q2,q3}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt2), "w"(tt3) : "memory"); - - - - t0 = LOAD2I_4(i4); - t1 = LOAD2I_5(i5); - t2 = LOAD2I_6(i6); - t3 = LOAD2I_7(i7); - t4 = ADD (t0, t1); - t5 = SUB (t0, t1); - t6 = ADD (t2, t3); - t7 = SUB (t2, t3); - r4 = ADD (t4, t6); - r6 = SUB (t4, t6); - r5 = HSP_SUB_MULI(&t5, &t7); - r7 = HSP_ADD_MULI(&t5, &t7); - - float32x4x2_t tmp2 = vtrnq_f32(r4, r6); - r4 = tmp2.val[0]; - r6 = tmp2.val[1]; - float32x4x2_t tmp3 = vtrnq_f32(r5, r7); - r5 = tmp3.val[0]; - r7 = tmp3.val[1]; - - - register V tt4 __asm__ ("q4") = r4; - register V tt5 __asm__ ("q5") = r5; - register V tt6 __asm__ ("q6") = r6; - register V tt7 __asm__ ("q7") = r7; - - __asm__ __volatile__ ("vst2.32 {q4,q5}, [%0, :128]!\n\t" : "+&r" (out0): "w"(tt4), "w"(tt5) : "memory"); - __asm__ __volatile__ ("vst2.32 {q6,q7}, [%0, :128]!\n\t" : "+&r" (out1): "w"(tt6), "w"(tt7) : "memory"); - - - -} - -static const __attribute__ ((aligned(16))) data_t eo_w_data[4] = {1.0f,0.70710678118654757273731092936941f, 0.0f,-0.70710678118654746171500846685376}; - - -__INLINE void neon_shl8_eo(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) { - /* - register V r0_1 __asm__ ("q0"); - register V r2_3 __asm__ ("q1"); - register V r4_5 __asm__ ("q2"); - register V r6_7 __asm__ ("q3"); - */ - const V w = vld1q_f32(eo_w_data); - - V r0_1, r2_3, r4_5, r6_7; - - register V r8_9 __asm__ ("q4"); - register V r10_11 __asm__ ("q5"); - register V r12_13 __asm__ ("q6"); - register V r14_15 __asm__ ("q7"); - - { - V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = LOAD2I_0(i0); - t1 = LOAD2I_1(i1); - t2 = LOAD2I_2(i2); - t3 = LOAD2I_3(i3); - t4 = ADD(t0, t1); - t5 = SUB(t0, t1); - t6 = ADD(t2, t3); - t7 = SUB(t2, t3); - - t0 = ADD(t4, t6); - t2 = SUB(t4, t6); - t1 = HSP_SUB_MULI(&t5, &t7); - t3 = HSP_ADD_MULI(&t5, &t7); - - float32x4x2_t tmp1 = vtrnq_f32(t0, t1); - t0 = tmp1.val[0]; - t1 = tmp1.val[1]; - float32x4x2_t tmp2 = vtrnq_f32(t2, t3); - t2 = tmp2.val[0]; - t3 = tmp2.val[1]; - - r0_1 = t0; - r2_3 = t2; - r8_9 = t1; - r10_11 = t3; - __asm__ __volatile__ ("vswp d9,d10\n\t" - "vst1.32 {d8,d9,d10,d11}, [%0, :128]!\n\t" -// "vst1.32 {d8,d9}, [%0, :128]!\n\t" -// "vst1.32 {d10,d11}, [%0, :128]!\n\t" - : "+&r" (out1) - : "w" (r8_9), "w" (r10_11) - : "memory"); - - } - { - V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = LOAD2I_4(i4); - t1 = LOAD2I_5(i5); - t2 = LOAD2I_6(i6); - t3 = LOAD2I_7(i7); - //t2 = HALFBLEND(t6, t7); - //t3 = HALFBLEND(t7, t6); - t4 = ADD(t0, t1); - t5 = SUB(t0, t1); - t6 = ADD(t2, t3); - t7 = SUB(t2, t3); - float32x4x2_t tmp1 = vtrnq_f32(t4, t5); - r4_5 = tmp1.val[0]; - float32x4x2_t tmp2 = vtrnq_f32(t6, t7); - r6_7 = tmp2.val[0]; - //t5 = MULI(t5); - t0 = ADD(t6, t4); - t2 = SUB(t6, t4); - t1 = HSP_SUB_MULI(&t7, &t5); - t3 = HSP_ADD_MULI(&t7, &t5); - - float32x4x2_t tmp3 = vtrnq_f32(t0, t1); - r12_13 = tmp3.val[1]; - float32x4x2_t tmp4 = vtrnq_f32(t2, t3); - r14_15 = tmp4.val[1]; - - - __asm__ __volatile__ ("vswp d13, d14\n\t" - "vst1.32 {d12,d13,d14,d15}, [%0, :128]!\n\t" -// "vst1.32 {d12,d13}, [%0, :128]!\n\t" -// "vst1.32 {d14,d15}, [%0, :128]!\n\t" - : "+&r" (out1) - : "w" (r12_13), "w" (r14_15) - : "memory"); - - - } - - K_N_HSP(&w,&r0_1,&r2_3,&r4_5,&r6_7); - - register V t0 __asm__ ("q0") = r0_1; - register V t1 __asm__ ("q1") = r2_3; - register V t2 __asm__ ("q2") = r4_5; - register V t3 __asm__ ("q3") = r6_7; - - __asm__ __volatile__ ("vswp d1, d2\n\t" - "vswp d5, d6\n\t" - "vstmia %0!, {q0-q3}\n\t" -// "vst1.32 {d0,d1}, [%0, :128]!\n\t" -// "vst1.32 {d2,d3}, [%0, :128]!\n\t" -// "vst1.32 {d4,d5}, [%0, :128]!\n\t" -// "vst1.32 {d6,d7}, [%0, :128]\n\t" - : "+&r" (out0) - : "w" (t0), "w" (t1), "w" (t2), "w" (t3) - : "memory"); - -} -static const __attribute__ ((aligned(16))) data_t oe_w_data[4] = {1.0f,0.70710678118654757273731092936941f, 0.0f,-0.70710678118654746171500846685376}; - -__INLINE void neon_shl8_oe(data_t *restrict out0, data_t *restrict out1,const data_t **restrict i0,const data_t **restrict i1,const data_t **restrict i2,const data_t **restrict i3,const data_t **restrict i4,const data_t **restrict i5,const data_t **restrict i6,const data_t **restrict i7) { - register V r0_1 __asm__ ("q0"); - register V r2_3 __asm__ ("q1"); - register V r4_5 __asm__ ("q2"); - register V r6_7 __asm__ ("q3"); - - V r8_9, r10_11, r12_13, r14_15; - const V w = vld1q_f32(oe_w_data); - - { - V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = LOAD2I_0(i0); - t1 = LOAD2I_1(i1); - t6 = LOADI_2(i2); - t7 = LOADI_3(i3); - - float32x2x2_t tmp0 = vtrn_f32(vget_low_f32(t6), vget_high_f32(t7)); - float32x2x2_t tmp1 = vtrn_f32(vget_low_f32(t7), vget_high_f32(t6)); - t2 = vcombine_f32(tmp0.val[0], tmp0.val[1]); - t3 = vcombine_f32(tmp1.val[0], tmp1.val[1]); - - t4 = ADD(t0, t1); - t5 = SUB(t0, t1); - t6 = ADD(t2, t3); - t7 = SUB(t2, t3); - float32x4x2_t tmp2 = vtrnq_f32(t4, t5); - r12_13 = tmp2.val[1]; - float32x4x2_t tmp3 = vtrnq_f32(t6, t7); - r14_15 = tmp3.val[1]; - - t0 = ADD(t4, t6); - t2 = SUB(t4, t6); - t1 = HSP_SUB_MULI(&t5, &t7); - t3 = HSP_ADD_MULI(&t5, &t7); - float32x4x2_t tmp4 = vtrnq_f32(t0, t1); - r0_1 = tmp4.val[0]; - float32x4x2_t tmp5 = vtrnq_f32(t2, t3); - r2_3 = tmp5.val[0]; - __asm__ __volatile__ ("vswp d1, d2\n\t" - "vst1.32 {q0, q1}, [%0, :128]!\n\t" -// "vst1.32 {q1}, [%0, :128]!\n\t" - : "+&r" (out0) - : "w" (r0_1), "w" (r2_3) - : "memory"); - } - { - V t0, t1, t2, t3, t4, t5, t6, t7; - t0 = LOAD2I_4(i4); - t1 = LOAD2I_5(i5); - t2 = LOAD2I_6(i6); - t3 = LOAD2I_7(i7); - t4 = ADD(t0, t1); - t5 = SUB(t0, t1); - t6 = ADD(t2, t3); - t7 = SUB(t2, t3); - t0 = ADD(t4, t6); - t2 = SUB(t4, t6); - t1 = HSP_SUB_MULI(&t5, &t7); - t3 = HSP_ADD_MULI(&t5, &t7); - - float32x4x2_t tmp0 = vtrnq_f32(t0, t1); - r4_5 = tmp0.val[0]; - r8_9 = tmp0.val[1]; - float32x4x2_t tmp1 = vtrnq_f32(t2, t3); - r6_7 = tmp1.val[0]; - r10_11 = tmp1.val[1]; - - - __asm__ __volatile__ ("vswp d5, d6\n\t" - "vst1.32 {q2, q3}, [%0, :128]!\n\t" -// "vst1.32 {q3}, [%0, :128]!\n\t" - : "+&r" (out0) - : "w" (r4_5), "w" (r6_7) - : "memory"); - - } - - K_N_HSP(&w,&r8_9,&r10_11,&r12_13,&r14_15); - register V t0 __asm__ ("q4") = r8_9; - register V t1 __asm__ ("q5") = r10_11; - register V t2 __asm__ ("q6") = r12_13; - register V t3 __asm__ ("q7") = r14_15; - - __asm__ __volatile__ ("vswp d9, d10\n\t" - "vswp d13, d14\n\t" - "vstmia %0!, {q4-q7}\n\t" -// "vst1.32 {q4}, [%0, :128]!\n\t" -// "vst1.32 {q5}, [%0, :128]!\n\t" -// "vst1.32 {q6}, [%0, :128]!\n\t" -// "vst1.32 {q7}, [%0, :128]\n\t" - : "+&r" (out1) - : "w" (t0), "w" (t1), "w" (t2), "w" (t3) - : "memory"); - - -} -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: -- cgit v1.1 From 7a085a444f81364b8026c06c76a09d83c7295ab0 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 12 Mar 2015 13:37:33 +0200 Subject: Replace data_t with float --- src/macros-neon.h | 84 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 31 deletions(-) diff --git a/src/macros-neon.h b/src/macros-neon.h index c015f47..5663252 100644 --- a/src/macros-neon.h +++ b/src/macros-neon.h @@ -8,14 +8,14 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -35,57 +35,79 @@ #include "neon.h" #include -typedef float32x4_t V; - +typedef float32x4_t V; typedef float32x4x2_t VS; -#define ADD vaddq_f32 -#define SUB vsubq_f32 -#define MUL vmulq_f32 +#define ADD vaddq_f32 +#define SUB vsubq_f32 +#define MUL vmulq_f32 #define VADD vaddq_f32 #define VSUB vsubq_f32 #define VMUL vmulq_f32 -#define VXOR(x,y) (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y)))) -#define VST vst1q_f32 -#define VLD vld1q_f32 + +#define VXOR(x,y) \ + (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y)))) + +#define VST vst1q_f32 +#define VLD vld1q_f32 #define VST2 vst2q_f32 #define VLD2 vld2q_f32 #define VSWAPPAIRS(x) (vrev64q_f32(x)) -#define VUNPACKHI(a,b) (vcombine_f32(vget_high_f32(a), vget_high_f32(b))) -#define VUNPACKLO(a,b) (vcombine_f32(vget_low_f32(a), vget_low_f32(b))) +#define VUNPACKHI(a,b) \ + (vcombine_f32(vget_high_f32(a), vget_high_f32(b))) -#define VBLEND(x,y) (vcombine_f32(vget_low_f32(x), vget_high_f32(y))) +#define VUNPACKLO(a,b) \ + (vcombine_f32(vget_low_f32(a), vget_low_f32(b))) -static FFTS_INLINE V VLIT4(data_t f3, data_t f2, data_t f1, data_t f0) { - data_t __attribute__ ((aligned(16))) d[4] = {f0, f1, f2, f3}; +#define VBLEND(x,y) \ + (vcombine_f32(vget_low_f32(x), vget_high_f32(y))) + +static FFTS_INLINE V +VLIT4(float f3, float f2, float f1, float f0) +{ + float FFTS_ALIGN(16) d[4] = {f0, f1, f2, f3}; return VLD(d); } -#define VDUPRE(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0)) -#define VDUPIM(r) vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1)) +#define VDUPRE(r) \ + vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0)) + +#define VDUPIM(r) \ + vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1)) #define FFTS_MALLOC(d,a) (valloc(d)) #define FFTS_FREE(d) (free(d)) -static FFTS_INLINE void STORESPR(data_t * addr, VS p) { - vst1q_f32(addr, p.val[0]); - vst1q_f32(addr + 4, p.val[1]); +static FFTS_INLINE void +STORESPR(float *addr, VS p) +{ + vst1q_f32(addr, p.val[0]); + vst1q_f32(addr + 4, p.val[1]); } -static FFTS_INLINE V IMULI(int inv, V a) { - if (inv) return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); - else return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); +static FFTS_INLINE V +IMULI(int inv, V a) +{ + if (inv) { + return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); + } else { + return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); + } } -static FFTS_INLINE V IMUL(V d, V re, V im) { +static FFTS_INLINE V +IMUL(V d, V re, V im) +{ re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); return VSUB(re, im); } -static FFTS_INLINE V IMULJ(V d, V re, V im) { +static FFTS_INLINE V +IMULJ(V d, V re, V im) +{ re = VMUL(re, d); im = VMUL(im, VSWAPPAIRS(d)); return VADD(re, im); -- cgit v1.1 From 835c5ab5b3d9f3104959dc6722b4bad600eae8fe Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 12 Mar 2015 18:03:00 +0200 Subject: Rename vector V as V4SF; vector of 4 single precision floats. Rename all vector V macros accordingly. Redefine ffts_constants as ffts_constants_32f and ffts_constants_64f. --- src/ffts.c | 66 +++--- src/ffts_small.c | 60 ++--- src/ffts_static.c | 635 ++++++++++++++++++++++++++++++----------------------- src/ffts_static.h | 63 +++--- src/macros-alpha.h | 167 ++++++++------ src/macros-neon.h | 153 ++++++------- src/macros-sse.h | 125 ++++++----- src/macros.h | 200 +++++++++-------- 8 files changed, 796 insertions(+), 673 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 94d6f1b..fd0b716 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -203,7 +203,7 @@ void ffts_free_1d(ffts_plan_t *p) static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { - V MULI_SIGN; + V4SF MULI_SIGN; int hardcoded; size_t lut_size; size_t n_luts; @@ -212,9 +212,9 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) size_t n; if (sign < 0) { - MULI_SIGN = VLIT4(-0.0f, 0.0f, -0.0f, 0.0f); + MULI_SIGN = V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f); } else { - MULI_SIGN = VLIT4(0.0f, -0.0f, 0.0f, -0.0f); + MULI_SIGN = V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f); } /* LUTS */ @@ -348,13 +348,13 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #else //w = FFTS_MALLOC(n/4 * 2 * sizeof(ffts_cpx_32f), 32); for (j = 0; j < n/4; j += 2) { - V re, im, temp0; - temp0 = VLD(fw0 + j*2); - re = VDUPRE(temp0); - im = VDUPIM(temp0); - im = VXOR(im, MULI_SIGN); - VST(fw + j*4 + 0, re); - VST(fw + j*4 + 4, im); + V4SF re, im, temp0; + temp0 = V4SF_LD(fw0 + j*2); + re = V4SF_DUPLICATE_RE(temp0); + im = V4SF_DUPLICATE_IM(temp0); + im = V4SF_XOR(im, MULI_SIGN); + V4SF_ST(fw + j*4 + 0, re); + V4SF_ST(fw + j*4 + 4, im); } w += n/4 * 2; @@ -371,7 +371,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) float *fw2 = (float*) w2; float *fw = (float *)w; - V temp0, temp1, temp2, re, im; + V4SF temp0, temp1, temp2, re, im; size_t j; for (j = 0; j < n/8; j++) { @@ -413,26 +413,26 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) #else //w = FFTS_MALLOC(n/8 * 3 * 2 * sizeof(ffts_cpx_32f), 32); for (j = 0; j < n/8; j += 2) { - temp0 = VLD(fw0 + j*2); - re = VDUPRE(temp0); - im = VDUPIM(temp0); - im = VXOR(im, MULI_SIGN); - VST(fw + j*2*6 , re); - VST(fw + j*2*6+4, im); - - temp1 = VLD(fw1 + j*2); - re = VDUPRE(temp1); - im = VDUPIM(temp1); - im = VXOR(im, MULI_SIGN); - VST(fw + j*2*6+8 , re); - VST(fw + j*2*6+12, im); - - temp2 = VLD(fw2 + j*2); - re = VDUPRE(temp2); - im = VDUPIM(temp2); - im = VXOR(im, MULI_SIGN); - VST(fw + j*2*6+16, re); - VST(fw + j*2*6+20, im); + temp0 = V4SF_LD(fw0 + j*2); + re = V4SF_DUPLICATE_RE(temp0); + im = V4SF_DUPLICATE_IM(temp0); + im = V4SF_XOR(im, MULI_SIGN); + V4SF_ST(fw + j*2*6 , re); + V4SF_ST(fw + j*2*6+4, im); + + temp1 = V4SF_LD(fw1 + j*2); + re = V4SF_DUPLICATE_RE(temp1); + im = V4SF_DUPLICATE_IM(temp1); + im = V4SF_XOR(im, MULI_SIGN); + V4SF_ST(fw + j*2*6+8 , re); + V4SF_ST(fw + j*2*6+12, im); + + temp2 = V4SF_LD(fw2 + j*2); + re = V4SF_DUPLICATE_RE(temp2); + im = V4SF_DUPLICATE_IM(temp2); + im = V4SF_XOR(im, MULI_SIGN); + V4SF_ST(fw + j*2*6+16, re); + V4SF_ST(fw + j*2*6+20, im); } w += n/8 * 3 * 2; @@ -514,9 +514,9 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) #ifdef DYNAMIC_DISABLED if (sign < 0) { - p->transform = ffts_static_transform_f; + p->transform = ffts_static_transform_f_32f; } else { - p->transform = ffts_static_transform_i; + p->transform = ffts_static_transform_i_32f; } #else /* determinate transform size */ diff --git a/src/ffts_small.c b/src/ffts_small.c index 34be7af..5bcbfc6 100644 --- a/src/ffts_small.c +++ b/src/ffts_small.c @@ -245,12 +245,12 @@ ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out) { const float *din = (const float*) in; float *dout = (float*) out; - V r0_1, r2_3, r4_5, r6_7; + V4SF r0_1, r2_3, r4_5, r6_7; float *LUT8 = (float*) p->ws + p->ws_is[0]; - L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); + V4SF_L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(0, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); } void @@ -258,7 +258,7 @@ ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - V r0_1, r2_3, r4_5, r6_7; + V4SF r0_1, r2_3, r4_5, r6_7; double *LUT8 = (double*) p->ws + p->ws_is[0]; #if MACROS_READY @@ -273,12 +273,12 @@ ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out) { const float *din = (const float*) in; float *dout = (float*) out; - V r0_1, r2_3, r4_5, r6_7; + V4SF r0_1, r2_3, r4_5, r6_7; float *LUT8 = (float*) p->ws + p->ws_is[0]; - L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); + V4SF_L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(1, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); } void @@ -286,7 +286,7 @@ ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - V r0_1, r2_3, r4_5, r6_7; + V4SF r0_1, r2_3, r4_5, r6_7; double *LUT8 = (double*) p->ws + p->ws_is[0]; #if MACROS_READY @@ -302,15 +302,15 @@ ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out) const float *din = (const float*) in; float *dout = (float*) out; float *LUT8 = (float*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - - L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + V4SF_L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + V4SF_L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + V4SF_K_N(0, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(0, V4SF_LD(LUT8+8), V4SF_LD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + V4SF_K_N(0, V4SF_LD(LUT8+16), V4SF_LD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); } void @@ -319,7 +319,7 @@ ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out) const double *din = (const double*) in; double *dout = (double*) out; double *LUT8 = (double*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; #ifdef MACROS_READY L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); @@ -338,15 +338,15 @@ ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out) const float *din = (const float*) in; float *dout = (float*) out; float *LUT8 = (float*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - - L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + V4SF_L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + V4SF_L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + V4SF_K_N(1, V4SF_LD(LUT8+ 0), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(1, V4SF_LD(LUT8+ 8), V4SF_LD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + V4SF_K_N(1, V4SF_LD(LUT8+16), V4SF_LD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); } void @@ -355,7 +355,7 @@ ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out) const double *din = (const double*) in; double *dout = (double*) out; double *LUT8 = (double*) p->ws; - V r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; #ifdef MACROS_READY L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); diff --git a/src/ffts_static.c b/src/ffts_static.c index cdecf1b..7a0bf4a 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -1,393 +1,465 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_static.h" + #include "ffts_internal.h" #include "macros.h" #include -static const FFTS_ALIGN(16) data_t ffts_constants[16] = { - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - -0.70710678118654746171500846685376, 0.70710678118654746171500846685376, - -0.70710678118654746171500846685376, 0.70710678118654746171500846685376, - 1.0, 1.0, - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - 0.0, 0.0, - -0.70710678118654746171500846685376, 0.70710678118654746171500846685376 +static const FFTS_ALIGN(16) float ffts_constants_32f[16] = { + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + -0.70710678118654746171500846685376f, + 0.70710678118654746171500846685376f, + -0.70710678118654746171500846685376f, + 0.70710678118654746171500846685376f, + 1.0f, + 1.0f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.0f, + 0.0f, + -0.70710678118654746171500846685376f, + 0.70710678118654746171500846685376f +}; + +static const FFTS_ALIGN(16) float ffts_constants_inv_32f[16] = { + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + -0.70710678118654746171500846685376f, + 0.70710678118654746171500846685376f, + -0.70710678118654746171500846685376f, + 1.0f, + 1.0f, + 0.70710678118654757273731092936941f, + 0.70710678118654757273731092936941f, + 0.0f, + 0.0f, + 0.70710678118654746171500846685376f, + -0.70710678118654746171500846685376f +}; + +static const FFTS_ALIGN(16) double ffts_constants_64f[16] = { + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + -0.70710678118654746171500846685376, + 0.70710678118654746171500846685376, + -0.70710678118654746171500846685376, + 0.70710678118654746171500846685376, + 1.0, + 1.0, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.0, + 0.0, + -0.70710678118654746171500846685376, + 0.70710678118654746171500846685376 }; -static const FFTS_ALIGN(16) data_t ffts_constants_inv[16] = { - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - 0.70710678118654746171500846685376, -0.70710678118654746171500846685376, - 0.70710678118654746171500846685376, -0.70710678118654746171500846685376, - 1.0, 1.0, - 0.70710678118654757273731092936941, 0.70710678118654757273731092936941, - 0.0, 0.0, - 0.70710678118654746171500846685376, -0.70710678118654746171500846685376 +static const FFTS_ALIGN(16) double ffts_constants_inv_64f[16] = { + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.70710678118654746171500846685376, + -0.70710678118654746171500846685376, + 0.70710678118654746171500846685376, + -0.70710678118654746171500846685376, + 1.0, + 1.0, + 0.70710678118654757273731092936941, + 0.70710678118654757273731092936941, + 0.0, + 0.0, + 0.70710678118654746171500846685376, + -0.70710678118654746171500846685376 }; -static FFTS_INLINE void K_0(int inv, V *r0, V *r1, V *r2, V *r3) +static FFTS_INLINE void +V4SF_K_0(int inv, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3; + V4SF t0, t1, t2, t3; t0 = *r0; t1 = *r1; - t2 = VADD(*r2, *r3); - t3 = IMULI(inv, VSUB(*r2, *r3)); + t2 = V4SF_ADD(*r2, *r3); + t3 = V4SF_IMULI(inv, V4SF_SUB(*r2, *r3)); - *r0 = VADD(t0, t2); - *r2 = VSUB(t0, t2); - *r1 = VSUB(t1, t3); - *r3 = VADD(t1, t3); + *r0 = V4SF_ADD(t0, t2); + *r2 = V4SF_SUB(t0, t2); + *r1 = V4SF_SUB(t1, t3); + *r3 = V4SF_ADD(t1, t3); } -static FFTS_INLINE void L_2(const data_t *FFTS_RESTRICT i0, - const data_t *FFTS_RESTRICT i1, - const data_t *FFTS_RESTRICT i2, - const data_t *FFTS_RESTRICT i3, - V *r0, - V *r1, - V *r2, - V *r3) +static FFTS_INLINE void +V4SF_L_2(const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3; + V4SF t0, t1, t2, t3; - t0 = VLD(i0); - t1 = VLD(i1); - t2 = VLD(i2); - t3 = VLD(i3); + t0 = V4SF_LD(i0); + t1 = V4SF_LD(i1); + t2 = V4SF_LD(i2); + t3 = V4SF_LD(i3); - *r0 = VADD(t0, t1); - *r1 = VSUB(t0, t1); - *r2 = VADD(t2, t3); - *r3 = VSUB(t2, t3); + *r0 = V4SF_ADD(t0, t1); + *r1 = V4SF_SUB(t0, t1); + *r2 = V4SF_ADD(t2, t3); + *r3 = V4SF_SUB(t2, t3); } -static FFTS_INLINE void L_4(int inv, - const data_t *FFTS_RESTRICT i0, - const data_t *FFTS_RESTRICT i1, - const data_t *FFTS_RESTRICT i2, - const data_t *FFTS_RESTRICT i3, - V *r0, - V *r1, - V *r2, - V *r3) +static FFTS_INLINE void +V4SF_L_4(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3, t4, t5, t6, t7; - - t0 = VLD(i0); - t1 = VLD(i1); - t2 = VLD(i2); - t3 = VLD(i3); - - t4 = VADD(t0, t1); - t5 = VSUB(t0, t1); - t6 = VADD(t2, t3); - t7 = IMULI(inv, VSUB(t2, t3)); - - *r0 = VADD(t4, t6); - *r2 = VSUB(t4, t6); - *r1 = VSUB(t5, t7); - *r3 = VADD(t5, t7); + V4SF t0, t1, t2, t3, t4, t5, t6, t7; + + t0 = V4SF_LD(i0); + t1 = V4SF_LD(i1); + t2 = V4SF_LD(i2); + t3 = V4SF_LD(i3); + + t4 = V4SF_ADD(t0, t1); + t5 = V4SF_SUB(t0, t1); + t6 = V4SF_ADD(t2, t3); + t7 = V4SF_IMULI(inv, V4SF_SUB(t2, t3)); + + *r0 = V4SF_ADD(t4, t6); + *r2 = V4SF_SUB(t4, t6); + *r1 = V4SF_SUB(t5, t7); + *r3 = V4SF_ADD(t5, t7); } -static FFTS_INLINE void LEAF_EE(data_t *const FFTS_RESTRICT out, - const ptrdiff_t *FFTS_RESTRICT os, - const data_t *FFTS_RESTRICT in, - const ptrdiff_t *FFTS_RESTRICT is, - int inv) +static FFTS_INLINE void +V4SF_LEAF_EE(float *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const float *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) { - const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f; - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - data_t *out0 = out + os[0]; - data_t *out1 = out + os[1]; + float *out0 = out + os[0]; + float *out1 = out + os[1]; - L_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); - L_2(in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); + V4SF_L_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + V4SF_L_2(in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); - K_0(inv, &r0, &r2, &r4, &r6); - K_N(inv, VLD(LUT + 0), VLD(LUT + 4), &r1, &r3, &r5, &r7); - TX2(&r0, &r1); - TX2(&r2, &r3); - TX2(&r4, &r5); - TX2(&r6, &r7); + V4SF_K_0(inv, &r0, &r2, &r4, &r6); + V4SF_K_N(inv, V4SF_LD(LUT + 0), V4SF_LD(LUT + 4), &r1, &r3, &r5, &r7); + V4SF_TX2(&r0, &r1); + V4SF_TX2(&r2, &r3); + V4SF_TX2(&r4, &r5); + V4SF_TX2(&r6, &r7); - S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); - S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + V4SF_S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + V4SF_S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); } -static FFTS_INLINE void LEAF_EE2(data_t *const FFTS_RESTRICT out, - const ptrdiff_t *FFTS_RESTRICT os, - const data_t *FFTS_RESTRICT in, - const ptrdiff_t *FFTS_RESTRICT is, - int inv) +static FFTS_INLINE void +V4SF_LEAF_EE2(float *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const float *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) { - const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f; - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - data_t *out0 = out + os[0]; - data_t *out1 = out + os[1]; + float *out0 = out + os[0]; + float *out1 = out + os[1]; - L_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r0, &r1, &r2, &r3); - L_2(in + is[0], in + is[1], in + is[3], in + is[2], &r4, &r5, &r6, &r7); + V4SF_L_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r0, &r1, &r2, &r3); + V4SF_L_2(in + is[0], in + is[1], in + is[3], in + is[2], &r4, &r5, &r6, &r7); - K_0(inv, &r0, &r2, &r4, &r6); - K_N(inv, VLD(LUT + 0), VLD(LUT + 4), &r1, &r3, &r5, &r7); - TX2(&r0, &r1); - TX2(&r2, &r3); - TX2(&r4, &r5); - TX2(&r6, &r7); + V4SF_K_0(inv, &r0, &r2, &r4, &r6); + V4SF_K_N(inv, V4SF_LD(LUT + 0), V4SF_LD(LUT + 4), &r1, &r3, &r5, &r7); + V4SF_TX2(&r0, &r1); + V4SF_TX2(&r2, &r3); + V4SF_TX2(&r4, &r5); + V4SF_TX2(&r6, &r7); - S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); - S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + V4SF_S_4(r0, r2, r4, r6, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + V4SF_S_4(r1, r3, r5, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); } -static FFTS_INLINE void LEAF_EO(data_t *const FFTS_RESTRICT out, - const ptrdiff_t *FFTS_RESTRICT os, - const data_t *FFTS_RESTRICT in, - const ptrdiff_t *FFTS_RESTRICT is, - int inv) +static FFTS_INLINE void +V4SF_LEAF_EO(float *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const float *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) { - const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f; - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - data_t *out0 = out + os[0]; - data_t *out1 = out + os[1]; + float *out0 = out + os[0]; + float *out1 = out + os[1]; - L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); - L_2_4(inv, in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); + V4SF_L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + V4SF_L_2_4(inv, in + is[4], in + is[5], in + is[6], in + is[7], &r4, &r5, &r6, &r7); - S_4(r2, r3, r7, r6, out1 + 0, out1 + 4, out1 + 8, out1 + 12); - K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r0, &r1, &r4, &r5); - S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + V4SF_S_4(r2, r3, r7, r6, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r0, &r1, &r4, &r5); + V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); } -static FFTS_INLINE void LEAF_OE(data_t *const FFTS_RESTRICT out, - const ptrdiff_t *FFTS_RESTRICT os, - const data_t *FFTS_RESTRICT in, - const ptrdiff_t *FFTS_RESTRICT is, - int inv) +static FFTS_INLINE void +V4SF_LEAF_OE(float *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const float *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) { - const data_t *FFTS_RESTRICT LUT = inv ? ffts_constants_inv : ffts_constants; + const float *FFTS_RESTRICT LUT = inv ? ffts_constants_inv_32f : ffts_constants_32f; - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - data_t *out0 = out + os[0]; - data_t *out1 = out + os[1]; + float *out0 = out + os[0]; + float *out1 = out + os[1]; - L_4_2(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); - L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); + V4SF_L_4_2(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + V4SF_L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); - S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); - K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r6, &r7, &r2, &r3); - S_4(r6, r7, r2, r3, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r6, &r7, &r2, &r3); + V4SF_S_4(r6, r7, r2, r3, out1 + 0, out1 + 4, out1 + 8, out1 + 12); } -static FFTS_INLINE void LEAF_OO(data_t *const FFTS_RESTRICT out, - const ptrdiff_t *FFTS_RESTRICT os, - const data_t *FFTS_RESTRICT in, - const ptrdiff_t *FFTS_RESTRICT is, - int inv) +static FFTS_INLINE void +V4SF_LEAF_OO(float *const FFTS_RESTRICT out, + const ptrdiff_t *FFTS_RESTRICT os, + const float *FFTS_RESTRICT in, + const ptrdiff_t *FFTS_RESTRICT is, + int inv) { - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - data_t *out0 = out + os[0]; - data_t *out1 = out + os[1]; + float *out0 = out + os[0]; + float *out1 = out + os[1]; - L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); - L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); + V4SF_L_4_4(inv, in + is[0], in + is[1], in + is[2], in + is[3], &r0, &r1, &r2, &r3); + V4SF_L_4_4(inv, in + is[6], in + is[7], in + is[4], in + is[5], &r4, &r5, &r6, &r7); - S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); - S_4(r2, r3, r6, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); + V4SF_S_4(r0, r1, r4, r5, out0 + 0, out0 + 4, out0 + 8, out0 + 12); + V4SF_S_4(r2, r3, r6, r7, out1 + 0, out1 + 4, out1 + 8, out1 + 12); } -static FFTS_INLINE void X_4(int inv, - data_t *FFTS_RESTRICT data, - size_t N, - const data_t *FFTS_RESTRICT LUT) +static FFTS_INLINE void +V4SF_X_4(int inv, + float *FFTS_RESTRICT data, + size_t N, + const float *FFTS_RESTRICT LUT) { size_t i; for (i = 0; i < N/8; i++) { - V r0 = VLD(data); - V r1 = VLD(data + 2*N/4); - V r2 = VLD(data + 4*N/4); - V r3 = VLD(data + 6*N/4); + V4SF r0 = V4SF_LD(data); + V4SF r1 = V4SF_LD(data + 2*N/4); + V4SF r2 = V4SF_LD(data + 4*N/4); + V4SF r3 = V4SF_LD(data + 6*N/4); - K_N(inv, VLD(LUT), VLD(LUT + 4), &r0, &r1, &r2, &r3); + V4SF_K_N(inv, V4SF_LD(LUT), V4SF_LD(LUT + 4), &r0, &r1, &r2, &r3); - VST(data , r0); - VST(data + 2*N/4, r1); - VST(data + 4*N/4, r2); - VST(data + 6*N/4, r3); + V4SF_ST(data , r0); + V4SF_ST(data + 2*N/4, r1); + V4SF_ST(data + 4*N/4, r2); + V4SF_ST(data + 6*N/4, r3); LUT += 8; data += 4; } } -static FFTS_INLINE void X_8(int inv, - data_t *FFTS_RESTRICT data0, - size_t N, - const data_t *FFTS_RESTRICT LUT) +static FFTS_INLINE void +V4SF_X_8(int inv, + float *FFTS_RESTRICT data0, + size_t N, + const float *FFTS_RESTRICT LUT) { - data_t *data1 = data0 + 1*N/4; - data_t *data2 = data0 + 2*N/4; - data_t *data3 = data0 + 3*N/4; - data_t *data4 = data0 + 4*N/4; - data_t *data5 = data0 + 5*N/4; - data_t *data6 = data0 + 6*N/4; - data_t *data7 = data0 + 7*N/4; + float *data1 = data0 + 1*N/4; + float *data2 = data0 + 2*N/4; + float *data3 = data0 + 3*N/4; + float *data4 = data0 + 4*N/4; + float *data5 = data0 + 5*N/4; + float *data6 = data0 + 6*N/4; + float *data7 = data0 + 7*N/4; size_t i; for (i = 0; i < N/16; i++) { - V r0, r1, r2, r3, r4, r5, r6, r7; + V4SF r0, r1, r2, r3, r4, r5, r6, r7; - r0 = VLD(data0); - r1 = VLD(data1); - r2 = VLD(data2); - r3 = VLD(data3); + r0 = V4SF_LD(data0); + r1 = V4SF_LD(data1); + r2 = V4SF_LD(data2); + r3 = V4SF_LD(data3); - K_N(inv, VLD(LUT), VLD(LUT + 4), &r0, &r1, &r2, &r3); - r4 = VLD(data4); - r6 = VLD(data6); + V4SF_K_N(inv, V4SF_LD(LUT), V4SF_LD(LUT + 4), &r0, &r1, &r2, &r3); + r4 = V4SF_LD(data4); + r6 = V4SF_LD(data6); - K_N(inv, VLD(LUT + 8), VLD(LUT + 12), &r0, &r2, &r4, &r6); - r5 = VLD(data5); - r7 = VLD(data7); + V4SF_K_N(inv, V4SF_LD(LUT + 8), V4SF_LD(LUT + 12), &r0, &r2, &r4, &r6); + r5 = V4SF_LD(data5); + r7 = V4SF_LD(data7); - K_N(inv, VLD(LUT + 16), VLD(LUT + 20), &r1, &r3, &r5, &r7); + V4SF_K_N(inv, V4SF_LD(LUT + 16), V4SF_LD(LUT + 20), &r1, &r3, &r5, &r7); LUT += 24; - VST(data0, r0); + V4SF_ST(data0, r0); data0 += 4; - VST(data1, r1); + V4SF_ST(data1, r1); data1 += 4; - VST(data2, r2); + V4SF_ST(data2, r2); data2 += 4; - VST(data3, r3); + V4SF_ST(data3, r3); data3 += 4; - VST(data4, r4); + V4SF_ST(data4, r4); data4 += 4; - VST(data5, r5); + V4SF_ST(data5, r5); data5 += 4; - VST(data6, r6); + V4SF_ST(data6, r6); data6 += 4; - VST(data7, r7); + V4SF_ST(data7, r7); data7 += 4; } } -static FFTS_INLINE void ffts_static_firstpass_odd(float *const FFTS_RESTRICT out, - const float *FFTS_RESTRICT in, - const ffts_plan_t *FFTS_RESTRICT p, - int inv) +static FFTS_INLINE void +ffts_static_firstpass_odd_32f(float *const FFTS_RESTRICT out, + const float *FFTS_RESTRICT in, + const ffts_plan_t *FFTS_RESTRICT p, + int inv) { size_t i, i0 = p->i0, i1 = p->i1; const ptrdiff_t *is = (const ptrdiff_t*) p->is; const ptrdiff_t *os = (const ptrdiff_t*) p->offsets; for (i = i0; i > 0; --i) { - LEAF_EE(out, os, in, is, inv); + V4SF_LEAF_EE(out, os, in, is, inv); in += 4; os += 2; } for (i = i1; i > 0; --i) { - LEAF_OO(out, os, in, is, inv); + V4SF_LEAF_OO(out, os, in, is, inv); in += 4; os += 2; } - LEAF_OE(out, os, in, is, inv); + V4SF_LEAF_OE(out, os, in, is, inv); in += 4; os += 2; for (i = i1; i > 0; --i) { - LEAF_EE2(out, os, in, is, inv); + V4SF_LEAF_EE2(out, os, in, is, inv); in += 4; os += 2; } } -static FFTS_INLINE void ffts_static_firstpass_even(float *FFTS_RESTRICT out, - const float *FFTS_RESTRICT in, - const ffts_plan_t *FFTS_RESTRICT p, - int inv) +static FFTS_INLINE void +ffts_static_firstpass_even_32f(float *FFTS_RESTRICT out, + const float *FFTS_RESTRICT in, + const ffts_plan_t *FFTS_RESTRICT p, + int inv) { size_t i, i0 = p->i0, i1 = p->i1; const ptrdiff_t *is = (const ptrdiff_t*) p->is; const ptrdiff_t *os = (const ptrdiff_t*) p->offsets; for(i = i0; i > 0; --i) { - LEAF_EE(out, os, in, is, inv); + V4SF_LEAF_EE(out, os, in, is, inv); in += 4; os += 2; } - LEAF_EO(out, os, in, is, inv); + V4SF_LEAF_EO(out, os, in, is, inv); in += 4; os += 2; for (i = i1; i > 0; --i) { - LEAF_OO(out, os, in, is, inv); + V4SF_LEAF_OO(out, os, in, is, inv); in += 4; os += 2; } for (i = i1; i > 0; --i) { - LEAF_EE2(out, os, in, is, inv); + V4SF_LEAF_EE2(out, os, in, is, inv); in += 4; os += 2; } } -void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) +static void +ffts_static_rec_f_32f(ffts_plan_t *p, float *data, size_t N) { const float *ws = (float*) p->ws; @@ -396,40 +468,41 @@ void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) size_t N2 = N >> 2; size_t N3 = N >> 3; - ffts_static_rec_f(p, data , N2); - ffts_static_rec_f(p, data + N1 , N3); - ffts_static_rec_f(p, data + N1 + N2, N3); - ffts_static_rec_f(p, data + N , N2); - ffts_static_rec_f(p, data + N + N1 , N2); + ffts_static_rec_f_32f(p, data , N2); + ffts_static_rec_f_32f(p, data + N1 , N3); + ffts_static_rec_f_32f(p, data + N1 + N2, N3); + ffts_static_rec_f_32f(p, data + N , N2); + ffts_static_rec_f_32f(p, data + N + N1 , N2); - X_8(0, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); + V4SF_X_8(0, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); } else if (N == 128) { const float *ws1 = ws + (p->ws_is[1] << 1); - X_8(0, data + 0, 32, ws1); + V4SF_X_8(0, data + 0, 32, ws1); - X_4(0, data + 64, 16, ws); - X_4(0, data + 96, 16, ws); + V4SF_X_4(0, data + 64, 16, ws); + V4SF_X_4(0, data + 96, 16, ws); - X_8(0, data + 128, 32, ws1); - X_8(0, data + 192, 32, ws1); + V4SF_X_8(0, data + 128, 32, ws1); + V4SF_X_8(0, data + 192, 32, ws1); - X_8(0, data, N, ws + (p->ws_is[3] << 1)); + V4SF_X_8(0, data, N, ws + (p->ws_is[3] << 1)); } else if (N == 64) { - X_4(0, data + 0, 16, ws); - X_4(0, data + 64, 16, ws); - X_4(0, data + 96, 16, ws); + V4SF_X_4(0, data + 0, 16, ws); + V4SF_X_4(0, data + 64, 16, ws); + V4SF_X_4(0, data + 96, 16, ws); - X_8(0, data + 0, N, ws + (p->ws_is[2] << 1)); + V4SF_X_8(0, data + 0, N, ws + (p->ws_is[2] << 1)); } else if (N == 32) { - X_8(0, data, N, ws + (p->ws_is[1] << 1)); + V4SF_X_8(0, data, N, ws + (p->ws_is[1] << 1)); } else { assert(N == 16); - X_4(0, data, N, ws); + V4SF_X_4(0, data, N, ws); } } -void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) +static void +ffts_static_rec_i_32f(ffts_plan_t *p, float *data, size_t N) { float *ws = (float*) p->ws; @@ -438,57 +511,59 @@ void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) size_t N2 = N >> 2; size_t N3 = N >> 3; - ffts_static_rec_i(p, data , N2); - ffts_static_rec_i(p, data + N1 , N3); - ffts_static_rec_i(p, data + N1 + N2, N3); - ffts_static_rec_i(p, data + N , N2); - ffts_static_rec_i(p, data + N + N1 , N2); + ffts_static_rec_i_32f(p, data , N2); + ffts_static_rec_i_32f(p, data + N1 , N3); + ffts_static_rec_i_32f(p, data + N1 + N2, N3); + ffts_static_rec_i_32f(p, data + N , N2); + ffts_static_rec_i_32f(p, data + N + N1 , N2); - X_8(1, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); + V4SF_X_8(1, data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); } else if (N == 128) { const float *ws1 = ws + (p->ws_is[1] << 1); - X_8(1, data + 0, 32, ws1); + V4SF_X_8(1, data + 0, 32, ws1); - X_4(1, data + 64, 16, ws); - X_4(1, data + 96, 16, ws); + V4SF_X_4(1, data + 64, 16, ws); + V4SF_X_4(1, data + 96, 16, ws); - X_8(1, data + 128, 32, ws1); - X_8(1, data + 192, 32, ws1); + V4SF_X_8(1, data + 128, 32, ws1); + V4SF_X_8(1, data + 192, 32, ws1); - X_8(1, data, N, ws + (p->ws_is[3] << 1)); + V4SF_X_8(1, data, N, ws + (p->ws_is[3] << 1)); } else if (N == 64) { - X_4(1, data + 0, 16, ws); - X_4(1, data + 64, 16, ws); - X_4(1, data + 96, 16, ws); + V4SF_X_4(1, data + 0, 16, ws); + V4SF_X_4(1, data + 64, 16, ws); + V4SF_X_4(1, data + 96, 16, ws); - X_8(1, data + 0, N, ws + (p->ws_is[2] << 1)); + V4SF_X_8(1, data + 0, N, ws + (p->ws_is[2] << 1)); } else if (N == 32) { - X_8(1, data, N, ws + (p->ws_is[1] << 1)); + V4SF_X_8(1, data, N, ws + (p->ws_is[1] << 1)); } else { assert(N == 16); - X_4(1, data, N, ws); + V4SF_X_4(1, data, N, ws); } } -void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out) +void +ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) { if (ffts_ctzl(p->N) & 1) { - ffts_static_firstpass_odd(out, in, p, 0); + ffts_static_firstpass_odd_32f((float*) out, (const float*) in, p, 0); } else { - ffts_static_firstpass_even(out, in, p, 0); + ffts_static_firstpass_even_32f((float*) out, (const float*) in, p, 0); } - ffts_static_rec_f(p, out, p->N); + ffts_static_rec_f_32f(p, (float*) out, p->N); } -void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out) +void +ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) { if (ffts_ctzl(p->N) & 1) { - ffts_static_firstpass_odd(out, in, p, 1); + ffts_static_firstpass_odd_32f((float*) out, (const float*) in, p, 1); } else { - ffts_static_firstpass_even(out, in, p, 1); + ffts_static_firstpass_even_32f((float*) out, (const float*) in, p, 1); } - ffts_static_rec_i(p, out, p->N); + ffts_static_rec_i_32f(p, (float*) out, p->N); } \ No newline at end of file diff --git a/src/ffts_static.h b/src/ffts_static.h index e599d80..924c3e1 100644 --- a/src/ffts_static.h +++ b/src/ffts_static.h @@ -1,33 +1,33 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -40,10 +40,7 @@ #include "ffts.h" -void ffts_static_rec_f(ffts_plan_t *p, float *data, size_t N) ; -void ffts_static_transform_f(ffts_plan_t *p, const void *in, void *out); - -void ffts_static_rec_i(ffts_plan_t *p, float *data, size_t N) ; -void ffts_static_transform_i(ffts_plan_t *p, const void *in, void *out); +void ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out); +void ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out); #endif /* FFTS_STATIC_H */ diff --git a/src/macros-alpha.h b/src/macros-alpha.h index f4efaf8..f7795d4 100644 --- a/src/macros-alpha.h +++ b/src/macros-alpha.h @@ -1,40 +1,52 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2013, Michael J. Cree - Copyright (c) 2012, 2013, Anthony M. Blake - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2013, Michael J. Cree +Copyright (c) 2012, 2013, Anthony M. Blake + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef FFTS_MACROS_ALPHA_H #define FFTS_MACROS_ALPHA_H +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + +#include "ffts_attributes.h" + +#ifdef HAVE_STRING_H #include +#endif + +#ifdef HAVE_STDLIB_H +#include +#endif typedef union { struct { @@ -44,14 +56,15 @@ typedef union { float i2; } r; uint32_t u[4]; -} V; +} V4SF; -#define FFTS_MALLOC(d,a) malloc(d) -#define FFTS_FREE(d) free(d) +#define FFTS_MALLOC(d,a) (malloc(d)) +#define FFTS_FREE(d) (free(d)) -static FFTS_ALWAYS_INLINE V VLIT4(float f3, float f2, float f1, float f0) +static FFTS_ALWAYS_INLINE V4SF +V4SF_LIT4(float f3, float f2, float f1, float f0) { - V z; + V4SF z; z.r.r1 = f0; z.r.i1 = f1; @@ -61,9 +74,10 @@ static FFTS_ALWAYS_INLINE V VLIT4(float f3, float f2, float f1, float f0) return z; } -static FFTS_ALWAYS_INLINE V VADD(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_ADD(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r1 + y.r.r1; z.r.i1 = x.r.i1 + y.r.i1; @@ -73,9 +87,10 @@ static FFTS_ALWAYS_INLINE V VADD(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VSUB(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_SUB(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r1 - y.r.r1; z.r.i1 = x.r.i1 - y.r.i1; @@ -85,9 +100,10 @@ static FFTS_ALWAYS_INLINE V VSUB(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VMUL(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_MUL(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r1 * y.r.r1; z.r.i1 = x.r.i1 * y.r.i1; @@ -97,9 +113,10 @@ static FFTS_ALWAYS_INLINE V VMUL(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VXOR(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_XOR(V4SF x, V4SF y) { - V z; + V4SF z; z.u[0] = x.u[0] ^ y.u[0]; z.u[1] = x.u[1] ^ y.u[1]; @@ -109,9 +126,10 @@ static FFTS_ALWAYS_INLINE V VXOR(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VSWAPPAIRS(V x) +static FFTS_ALWAYS_INLINE V4SF +V4SF_SWAP_PAIRS(V4SF x) { - V z; + V4SF z; z.r.r1 = x.r.i1; z.r.i1 = x.r.r1; @@ -121,9 +139,10 @@ static FFTS_ALWAYS_INLINE V VSWAPPAIRS(V x) return z; } -static FFTS_ALWAYS_INLINE V VBLEND(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_BLEND(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r1; z.r.i1 = x.r.i1; @@ -133,9 +152,10 @@ static FFTS_ALWAYS_INLINE V VBLEND(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VUNPACKHI(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_UNPACK_HI(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r2; z.r.i1 = x.r.i2; @@ -145,9 +165,10 @@ static FFTS_ALWAYS_INLINE V VUNPACKHI(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VUNPACKLO(V x, V y) +static FFTS_ALWAYS_INLINE V4SF +V4SF_UNPACK_LO(V4SF x, V4SF y) { - V z; + V4SF z; z.r.r1 = x.r.r1; z.r.i1 = x.r.i1; @@ -157,9 +178,10 @@ static FFTS_ALWAYS_INLINE V VUNPACKLO(V x, V y) return z; } -static FFTS_ALWAYS_INLINE V VDUPRE(V x) +static FFTS_ALWAYS_INLINE V4SF +V4SF_DUPLICATE_RE(V4SF x) { - V z; + V4SF z; z.r.r1 = x.r.r1; z.r.i1 = x.r.r1; @@ -169,9 +191,10 @@ static FFTS_ALWAYS_INLINE V VDUPRE(V x) return z; } -static FFTS_ALWAYS_INLINE V VDUPIM(V x) +static FFTS_ALWAYS_INLINE V4SF +V4SF_DUPLICATE_IM(V4SF x) { - V z; + V4SF z; z.r.r1 = x.r.i1; z.r.i1 = x.r.i1; @@ -181,23 +204,26 @@ static FFTS_ALWAYS_INLINE V VDUPIM(V x) return z; } -static FFTS_ALWAYS_INLINE V IMUL(V d, V re, V im) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMUL(V4SF d, V4SF re, V4SF im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_SUB(re, im); } -static FFTS_ALWAYS_INLINE V IMULJ(V d, V re, V im) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULJ(V4SF d, V4SF re, V4SF im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VADD(re, im); + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_ADD(re, im); } -static FFTS_ALWAYS_INLINE V MULI(int inv, V x) +static FFTS_ALWAYS_INLINE V4SF +V4SF_MULI(int inv, V4SF x) { - V z; + V4SF z; if (inv) { z.r.r1 = -x.r.r1; @@ -214,21 +240,24 @@ static FFTS_ALWAYS_INLINE V MULI(int inv, V x) return z; } -static FFTS_ALWAYS_INLINE V IMULI(int inv, V x) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULI(int inv, V4SF x) { - return VSWAPPAIRS(MULI(inv, x)); + return V4SF_SWAP_PAIRS(V4SF_MULI(inv, x)); } -static FFTS_ALWAYS_INLINE V VLD(const void *s) +static FFTS_ALWAYS_INLINE V4SF +V4SF_LD(const void *s) { - V z; + V4SF z; memcpy(&z, s, sizeof(z)); return z; } -static FFTS_ALWAYS_INLINE void VST(void *d, V s) +static FFTS_ALWAYS_INLINE void +V4SF_ST(void *d, V4SF s) { - V *r = (V*) d; + V4SF *r = (V4SF*) d; *r = s; } diff --git a/src/macros-neon.h b/src/macros-neon.h index 5663252..4ec92b3 100644 --- a/src/macros-neon.h +++ b/src/macros-neon.h @@ -1,116 +1,119 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, 2013, Anthony M. Blake - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, 2013, Anthony M. Blake + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + #ifndef FFTS_MACROS_NEON_H #define FFTS_MACROS_NEON_H -#include "neon.h" #include -typedef float32x4_t V; -typedef float32x4x2_t VS; +#ifdef HAVE_STDLIB_H +#include +#endif + +#define FFTS_MALLOC(d,a) (valloc(d)) +#define FFTS_FREE(d) (free(d)) + +typedef float32x4_t V4SF; +typedef float32x4x2_t V4SF2; -#define ADD vaddq_f32 -#define SUB vsubq_f32 -#define MUL vmulq_f32 -#define VADD vaddq_f32 -#define VSUB vsubq_f32 -#define VMUL vmulq_f32 +#define V4SF_ADD vaddq_f32 +#define V4SF_SUB vsubq_f32 +#define V4SF_MUL vmulq_f32 -#define VXOR(x,y) \ +#define V4SF_XOR(x,y) \ (vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(x), vreinterpretq_u32_f32(y)))) -#define VST vst1q_f32 -#define VLD vld1q_f32 -#define VST2 vst2q_f32 -#define VLD2 vld2q_f32 +#define V4SF_ST vst1q_f32 +#define V4SF_LD vld1q_f32 -#define VSWAPPAIRS(x) (vrev64q_f32(x)) +#define V4SF_SWAP_PAIRS(x) \ + (vrev64q_f32(x)) -#define VUNPACKHI(a,b) \ +#define V4SF_UNPACK_HI(a,b) \ (vcombine_f32(vget_high_f32(a), vget_high_f32(b))) -#define VUNPACKLO(a,b) \ +#define V4SF_UNPACK_LO(a,b) \ (vcombine_f32(vget_low_f32(a), vget_low_f32(b))) -#define VBLEND(x,y) \ +#define V4SF_BLEND(x,y) \ (vcombine_f32(vget_low_f32(x), vget_high_f32(y))) -static FFTS_INLINE V -VLIT4(float f3, float f2, float f1, float f0) +static FFTS_ALWAYS_INLINE V4SF +V4SF_LIT4(float f3, float f2, float f1, float f0) { float FFTS_ALIGN(16) d[4] = {f0, f1, f2, f3}; - return VLD(d); + return V4SF_LD(d); } -#define VDUPRE(r) \ +#define V4SF_DUPLICATE_RE(r) \ vcombine_f32(vdup_lane_f32(vget_low_f32(r),0), vdup_lane_f32(vget_high_f32(r),0)) -#define VDUPIM(r) \ +#define V4SF_DUPLICATE_IM(r) \ vcombine_f32(vdup_lane_f32(vget_low_f32(r),1), vdup_lane_f32(vget_high_f32(r),1)) -#define FFTS_MALLOC(d,a) (valloc(d)) -#define FFTS_FREE(d) (free(d)) - -static FFTS_INLINE void -STORESPR(float *addr, VS p) -{ - vst1q_f32(addr, p.val[0]); - vst1q_f32(addr + 4, p.val[1]); -} - -static FFTS_INLINE V -IMULI(int inv, V a) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULI(int inv, V a) { if (inv) { - return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); + return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f))); } else { - return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); + return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f))); } } -static FFTS_INLINE V -IMUL(V d, V re, V im) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMUL(V4SF d, V4SF re, V4SF im) +{ + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_SUB(re, im); +} + +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULJ(V d, V re, V im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_ADD(re, im); } -static FFTS_INLINE V -IMULJ(V d, V re, V im) +#define V4SF2_ST vst2q_f32 +#define V4SF2_LD vld2q_f32 + +static FFTS_ALWAYS_INLINE void +V4SF2_STORE_SPR(float *addr, V4SF2 p) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VADD(re, im); + vst1q_f32(addr, p.val[0]); + vst1q_f32(addr + 4, p.val[1]); } #endif /* FFTS_MACROS_NEON_H */ diff --git a/src/macros-sse.h b/src/macros-sse.h index cab822c..827aa67 100644 --- a/src/macros-sse.h +++ b/src/macros-sse.h @@ -1,87 +1,100 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef FFTS_MACROS_SSE_H #define FFTS_MACROS_SSE_H +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + #include -//#define VL 4 +#define FFTS_MALLOC(d,a) (_mm_malloc(d,a)) +#define FFTS_FREE(d) (_mm_free(d)) + +typedef __m128 V4SF; -typedef __m128 V; +#define V4SF_ADD _mm_add_ps +#define V4SF_SUB _mm_sub_ps +#define V4SF_MUL _mm_mul_ps +#define V4SF_LIT4 _mm_set_ps +#define V4SF_XOR _mm_xor_ps +#define V4SF_ST _mm_store_ps +#define V4SF_LD _mm_load_ps -#define VADD _mm_add_ps -#define VSUB _mm_sub_ps -#define VMUL _mm_mul_ps -#define VLIT4 _mm_set_ps -#define VXOR _mm_xor_ps -#define VST _mm_store_ps -#define VLD _mm_load_ps +#define V4SF_SWAP_PAIRS(x) \ + (_mm_shuffle_ps(x, x, _MM_SHUFFLE(2,3,0,1))) -#define VSWAPPAIRS(x) (_mm_shuffle_ps(x,x,_MM_SHUFFLE(2,3,0,1))) +#define V4SF_UNPACK_HI(x,y) \ + (_mm_shuffle_ps(x, y, _MM_SHUFFLE(3,2,3,2))) -#define VUNPACKHI(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,3,2))) -#define VUNPACKLO(x,y) (_mm_movelh_ps(x,y)) +#define V4SF_UNPACK_LO(x,y) \ + (_mm_movelh_ps(x, y)) -#define VBLEND(x,y) (_mm_shuffle_ps(x,y,_MM_SHUFFLE(3,2,1,0))) +#define V4SF_BLEND(x, y) \ + (_mm_shuffle_ps(x, y, _MM_SHUFFLE(3,2,1,0))) -#define VDUPRE(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(2,2,0,0))) -#define VDUPIM(r) (_mm_shuffle_ps(r,r,_MM_SHUFFLE(3,3,1,1))) +#define V4SF_DUPLICATE_RE(r) \ + (_mm_shuffle_ps(r, r, _MM_SHUFFLE(2,2,0,0))) -#define FFTS_MALLOC(d,a) (_mm_malloc(d,a)) -#define FFTS_FREE(d) (_mm_free(d)) +#define V4SF_DUPLICATE_IM(r) \ + (_mm_shuffle_ps(r, r, _MM_SHUFFLE(3,3,1,1))) -static FFTS_ALWAYS_INLINE V IMULI(int inv, V a) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULI(int inv, V4SF a) { if (inv) { - return VSWAPPAIRS(VXOR(a, VLIT4(0.0f, -0.0f, 0.0f, -0.0f))); + return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(0.0f, -0.0f, 0.0f, -0.0f))); } else { - return VSWAPPAIRS(VXOR(a, VLIT4(-0.0f, 0.0f, -0.0f, 0.0f))); + return V4SF_SWAP_PAIRS(V4SF_XOR(a, V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f))); } } -static FFTS_ALWAYS_INLINE V IMUL(V d, V re, V im) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMUL(V4SF d, V4SF re, V4SF im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VSUB(re, im); + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_SUB(re, im); } -static FFTS_ALWAYS_INLINE V IMULJ(V d, V re, V im) +static FFTS_ALWAYS_INLINE V4SF +V4SF_IMULJ(V4SF d, V4SF re, V4SF im) { - re = VMUL(re, d); - im = VMUL(im, VSWAPPAIRS(d)); - return VADD(re, im); + re = V4SF_MUL(re, d); + im = V4SF_MUL(im, V4SF_SWAP_PAIRS(d)); + return V4SF_ADD(re, im); } #endif /* FFTS_MACROS_SSE_H */ diff --git a/src/macros.h b/src/macros.h index fc53ae4..b755775 100644 --- a/src/macros.h +++ b/src/macros.h @@ -49,102 +49,108 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif static FFTS_INLINE void -TX2(V *a, V *b) +V4SF_TX2(V4SF *a, V4SF *b) { - V TX2_t0 = VUNPACKLO(*a, *b); - V TX2_t1 = VUNPACKHI(*a, *b); - *a = TX2_t0; - *b = TX2_t1; + V4SF t0 = V4SF_UNPACK_LO(*a, *b); + V4SF t1 = V4SF_UNPACK_HI(*a, *b); + *a = t0; + *b = t1; } static FFTS_INLINE void -K_N(int inv, V re, V im, V *r0, V *r1, V *r2, V *r3) +V4SF_K_N(int inv, + V4SF re, + V4SF im, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V uk, uk2, zk_p, zk_n, zk, zk_d; + V4SF uk, uk2, zk_p, zk_n, zk, zk_d; uk = *r0; uk2 = *r1; - zk_p = IMUL(*r2, re, im); - zk_n = IMULJ(*r3, re, im); + zk_p = V4SF_IMUL(*r2, re, im); + zk_n = V4SF_IMULJ(*r3, re, im); - zk = VADD(zk_p, zk_n); - zk_d = IMULI(inv, VSUB(zk_p, zk_n)); + zk = V4SF_ADD(zk_p, zk_n); + zk_d = V4SF_IMULI(inv, V4SF_SUB(zk_p, zk_n)); - *r2 = VSUB(uk, zk); - *r0 = VADD(uk, zk); - *r3 = VADD(uk2, zk_d); - *r1 = VSUB(uk2, zk_d); + *r2 = V4SF_SUB(uk, zk); + *r0 = V4SF_ADD(uk, zk); + *r3 = V4SF_ADD(uk2, zk_d); + *r1 = V4SF_SUB(uk2, zk_d); } static FFTS_INLINE void -L_2_4(int inv, - const float *FFTS_RESTRICT i0, - const float *FFTS_RESTRICT i1, - const float *FFTS_RESTRICT i2, - const float *FFTS_RESTRICT i3, - V *r0, - V *r1, - V *r2, - V *r3) +V4SF_L_2_4(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3, t4, t5, t6, t7; + V4SF t0, t1, t2, t3, t4, t5, t6, t7; - t0 = VLD(i0); - t1 = VLD(i1); - t2 = VLD(i2); - t3 = VLD(i3); + t0 = V4SF_LD(i0); + t1 = V4SF_LD(i1); + t2 = V4SF_LD(i2); + t3 = V4SF_LD(i3); - t4 = VADD(t0, t1); - t5 = VSUB(t0, t1); - t6 = VADD(t2, t3); - t7 = VSUB(t2, t3); + t4 = V4SF_ADD(t0, t1); + t5 = V4SF_SUB(t0, t1); + t6 = V4SF_ADD(t2, t3); + t7 = V4SF_SUB(t2, t3); - *r0 = VUNPACKLO(t4, t5); - *r1 = VUNPACKLO(t6, t7); + *r0 = V4SF_UNPACK_LO(t4, t5); + *r1 = V4SF_UNPACK_LO(t6, t7); - t5 = IMULI(inv, t5); + t5 = V4SF_IMULI(inv, t5); - t0 = VADD(t6, t4); - t2 = VSUB(t6, t4); - t1 = VSUB(t7, t5); - t3 = VADD(t7, t5); + t0 = V4SF_ADD(t6, t4); + t2 = V4SF_SUB(t6, t4); + t1 = V4SF_SUB(t7, t5); + t3 = V4SF_ADD(t7, t5); - *r3 = VUNPACKHI(t0, t1); - *r2 = VUNPACKHI(t2, t3); + *r3 = V4SF_UNPACK_HI(t0, t1); + *r2 = V4SF_UNPACK_HI(t2, t3); } static FFTS_INLINE void -L_4_4(int inv, - const float *FFTS_RESTRICT i0, - const float *FFTS_RESTRICT i1, - const float *FFTS_RESTRICT i2, - const float *FFTS_RESTRICT i3, - V *r0, - V *r1, - V *r2, - V *r3) +V4SF_L_4_4(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3, t4, t5, t6, t7; + V4SF t0, t1, t2, t3, t4, t5, t6, t7; - t0 = VLD(i0); - t1 = VLD(i1); - t2 = VLD(i2); - t3 = VLD(i3); + t0 = V4SF_LD(i0); + t1 = V4SF_LD(i1); + t2 = V4SF_LD(i2); + t3 = V4SF_LD(i3); - t4 = VADD(t0, t1); - t5 = VSUB(t0, t1); - t6 = VADD(t2, t3); + t4 = V4SF_ADD(t0, t1); + t5 = V4SF_SUB(t0, t1); + t6 = V4SF_ADD(t2, t3); - t7 = IMULI(inv, VSUB(t2, t3)); + t7 = V4SF_IMULI(inv, V4SF_SUB(t2, t3)); - t0 = VADD(t4, t6); - t2 = VSUB(t4, t6); - t1 = VSUB(t5, t7); - t3 = VADD(t5, t7); + t0 = V4SF_ADD(t4, t6); + t2 = V4SF_SUB(t4, t6); + t1 = V4SF_SUB(t5, t7); + t3 = V4SF_ADD(t5, t7); - TX2(&t0, &t1); - TX2(&t2, &t3); + V4SF_TX2(&t0, &t1); + V4SF_TX2(&t2, &t3); *r0 = t0; *r2 = t1; @@ -153,46 +159,46 @@ L_4_4(int inv, } static FFTS_INLINE void -L_4_2(int inv, - const float *FFTS_RESTRICT i0, - const float *FFTS_RESTRICT i1, - const float *FFTS_RESTRICT i2, - const float *FFTS_RESTRICT i3, - V *r0, - V *r1, - V *r2, - V *r3) +V4SF_L_4_2(int inv, + const float *FFTS_RESTRICT i0, + const float *FFTS_RESTRICT i1, + const float *FFTS_RESTRICT i2, + const float *FFTS_RESTRICT i3, + V4SF *r0, + V4SF *r1, + V4SF *r2, + V4SF *r3) { - V t0, t1, t2, t3, t4, t5, t6, t7; + V4SF t0, t1, t2, t3, t4, t5, t6, t7; - t0 = VLD(i0); - t1 = VLD(i1); - t6 = VLD(i2); - t7 = VLD(i3); + t0 = V4SF_LD(i0); + t1 = V4SF_LD(i1); + t6 = V4SF_LD(i2); + t7 = V4SF_LD(i3); - t2 = VBLEND(t6, t7); - t3 = VBLEND(t7, t6); + t2 = V4SF_BLEND(t6, t7); + t3 = V4SF_BLEND(t7, t6); - t4 = VADD(t0, t1); - t5 = VSUB(t0, t1); - t6 = VADD(t2, t3); - t7 = VSUB(t2, t3); + t4 = V4SF_ADD(t0, t1); + t5 = V4SF_SUB(t0, t1); + t6 = V4SF_ADD(t2, t3); + t7 = V4SF_SUB(t2, t3); - *r2 = VUNPACKHI(t4, t5); - *r3 = VUNPACKHI(t6, t7); + *r2 = V4SF_UNPACK_HI(t4, t5); + *r3 = V4SF_UNPACK_HI(t6, t7); - t7 = IMULI(inv, t7); + t7 = V4SF_IMULI(inv, t7); - t0 = VADD(t4, t6); - t2 = VSUB(t4, t6); - t1 = VSUB(t5, t7); - t3 = VADD(t5, t7); + t0 = V4SF_ADD(t4, t6); + t2 = V4SF_SUB(t4, t6); + t1 = V4SF_SUB(t5, t7); + t3 = V4SF_ADD(t5, t7); - *r0 = VUNPACKLO(t0, t1); - *r1 = VUNPACKLO(t2, t3); + *r0 = V4SF_UNPACK_LO(t0, t1); + *r1 = V4SF_UNPACK_LO(t2, t3); } -#define S_4(r0, r1, r2, r3, o0, o1, o2, o3) \ - VST(o0, r0); VST(o1, r1); VST(o2, r2); VST(o3, r3); +#define V4SF_S_4(r0, r1, r2, r3, o0, o1, o2, o3) \ + V4SF_ST(o0, r0); V4SF_ST(o1, r1); V4SF_ST(o2, r2); V4SF_ST(o3, r3); #endif /* FFTS_MACROS_H */ \ No newline at end of file -- cgit v1.1 From 13a81490439e7ce823fea11fcbd09cfa7286671a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 13 Mar 2015 11:13:10 +0200 Subject: Forgot to rename some V macros --- src/codegen_arm.h | 2 ++ src/ffts.c | 46 +++++++++++++++++++++++----------------------- src/ffts_real_nd.h | 2 +- src/macros-neon.h | 4 ++-- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/src/codegen_arm.h b/src/codegen_arm.h index 2948ec3..7508f57 100644 --- a/src/codegen_arm.h +++ b/src/codegen_arm.h @@ -34,6 +34,8 @@ #ifndef FFTS_CODEGEN_ARM_H #define FFTS_CODEGEN_ARM_H +#include "neon.h" + uint32_t BL(void *pos, void *target) { return 0xeb000000 | (((target - pos) / 4) & 0xffffff); } diff --git a/src/ffts.c b/src/ffts.c index fd0b716..5774a56 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -285,7 +285,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) } #ifdef HAVE_NEON - V neg = (sign < 0) ? VLIT4(0.0f, 0.0f, 0.0f, 0.0f) : VLIT4(-0.0f, -0.0f, -0.0f, -0.0f); + V4SF neg = (sign < 0) ? V4SF_LIT4(0.0f, 0.0f, 0.0f, 0.0f) : V4SF_LIT4(-0.0f, -0.0f, -0.0f, -0.0f); #endif for (i = 0; i < n_luts; i++) { @@ -307,21 +307,21 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) if (N < 32) { // w = FFTS_MALLOC(n/4 * 2 * sizeof(ffts_cpx_32f), 32); float *fw = (float *)w; - V temp0, temp1, temp2; + V4SF temp0, temp1, temp2; for (j=0; j0, im); #else - im = MULI(sign>0, im); + im = V4SF_MULI(sign>0, im); #endif - VST(fw + j*4 , re); - VST(fw + j*4+4, im); + V4SF_ST(fw + j*4 , re); + V4SF_ST(fw + j*4+4, im); // #endif } w += n/4 * 2; @@ -330,11 +330,11 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) float *fw = (float *)w; #ifdef HAVE_NEON { - VS temp0, temp1, temp2; + V4SF2 temp0, temp1, temp2; for (j=0; j Date: Fri, 13 Mar 2015 11:30:20 +0200 Subject: One more macro fix --- src/ffts.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 5774a56..56325cc 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -310,7 +310,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) V4SF temp0, temp1, temp2; for (j=0; j Date: Fri, 13 Mar 2015 11:39:58 +0200 Subject: Add string.h to fix implicit declaration of function 'memcpy' --- src/codegen_arm.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/codegen_arm.h b/src/codegen_arm.h index 7508f57..3d146da 100644 --- a/src/codegen_arm.h +++ b/src/codegen_arm.h @@ -36,6 +36,10 @@ #include "neon.h" +#ifdef HAVE_STRING_H +#include +#endif + uint32_t BL(void *pos, void *target) { return 0xeb000000 | (((target - pos) / 4) & 0xffffff); } -- cgit v1.1 From 1ea951f98b7f35c42c49394a75ae3b8cf3e62dfe Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 16 Mar 2015 13:21:23 +0200 Subject: Merge ffts_small with ffts_static, and define small transforms "fully" constant --- CMakeLists.txt | 9 +- src/ffts.c | 6 +- src/ffts_small.c | 369 ---------------------------------- src/ffts_small.h | 85 -------- src/ffts_static.c | 584 +++++++++++++++++++++++++++++++++++++++++++++++++----- src/ffts_static.h | 49 ++++- 6 files changed, 585 insertions(+), 517 deletions(-) delete mode 100644 src/ffts_small.c delete mode 100644 src/ffts_small.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 12b3bf8..e96218b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -183,8 +183,8 @@ set(FFTS_SOURCES src/ffts_real.c src/ffts_real_nd.c src/ffts_real_nd.h - src/ffts_small.c - src/ffts_small.h + src/ffts_static.c + src/ffts_static.h src/macros.h src/patterns.c src/patterns.h @@ -268,11 +268,6 @@ elseif(HAVE_XMMINTRIN_H) endif(ENABLE_NEON) if(DISABLE_DYNAMIC_CODE) - list(APPEND FFTS_SOURCES - src/ffts_static.c - src/ffts_static.h - ) - add_definitions(-DDYNAMIC_DISABLED) else() list(APPEND FFTS_SOURCES diff --git a/src/ffts.c b/src/ffts.c index 56325cc..8f809db 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -34,13 +34,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts.h" #include "ffts_internal.h" +#include "ffts_static.h" #include "macros.h" #include "patterns.h" -#include "ffts_small.h" -#ifdef DYNAMIC_DISABLED -#include "ffts_static.h" -#else +#ifndef DYNAMIC_DISABLED #include "codegen.h" #endif diff --git a/src/ffts_small.c b/src/ffts_small.c deleted file mode 100644 index 5bcbfc6..0000000 --- a/src/ffts_small.c +++ /dev/null @@ -1,369 +0,0 @@ -/* - -This file is part of FFTS -- The Fastest Fourier Transform in the South - -Copyright (c) 2013, Michael J. Cree -Copyright (c) 2012, 2013, Anthony M. Blake - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of the organization nor the -names of its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#include "ffts_small.h" - -#include "ffts_internal.h" -#include "macros.h" - -void -ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - ffts_cpx_32f t0, t1, r0, r1; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[2]; - t1[1] = din[3]; - - r0[0] = t0[0] + t1[0]; - r0[1] = t0[1] + t1[1]; - r1[0] = t0[0] - t1[0]; - r1[1] = t0[1] - t1[1]; - - dout[0] = r0[0]; - dout[1] = r0[1]; - dout[2] = r1[0]; - dout[3] = r1[1]; -} - -void -ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - ffts_cpx_64f t0, t1, r0, r1; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[2]; - t1[1] = din[3]; - - r0[0] = t0[0] + t1[0]; - r0[1] = t0[1] + t1[1]; - r1[0] = t0[0] - t1[0]; - r1[1] = t0[1] - t1[1]; - - dout[0] = r0[0]; - dout[1] = r0[1]; - dout[2] = r1[0]; - dout[3] = r1[1]; -} - -void -ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[4]; - t1[1] = din[5]; - t2[0] = din[2]; - t2[1] = din[3]; - t3[0] = din[6]; - t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; - t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; - t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; - t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; - t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; - dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; - dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] + t7[1]; - dout[3] = t5[1] - t7[0]; - dout[6] = t5[0] - t7[1]; - dout[7] = t5[1] + t7[0]; -} - -void -ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[4]; - t1[1] = din[5]; - t2[0] = din[2]; - t2[1] = din[3]; - t3[0] = din[6]; - t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; - t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; - t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; - t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; - t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; - dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; - dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] + t7[1]; - dout[3] = t5[1] - t7[0]; - dout[6] = t5[0] - t7[1]; - dout[7] = t5[1] + t7[0]; -} - -void -ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[4]; - t1[1] = din[5]; - t2[0] = din[2]; - t2[1] = din[3]; - t3[0] = din[6]; - t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; - t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; - t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; - t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; - t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; - dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; - dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] - t7[1]; - dout[3] = t5[1] + t7[0]; - dout[6] = t5[0] + t7[1]; - dout[7] = t5[1] - t7[0]; -} - -void -ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; - - /* unreferenced parameter */ - (void) p; - - t0[0] = din[0]; - t0[1] = din[1]; - t1[0] = din[4]; - t1[1] = din[5]; - t2[0] = din[2]; - t2[1] = din[3]; - t3[0] = din[6]; - t3[1] = din[7]; - - t4[0] = t0[0] + t1[0]; - t4[1] = t0[1] + t1[1]; - t5[0] = t0[0] - t1[0]; - t5[1] = t0[1] - t1[1]; - t6[0] = t2[0] + t3[0]; - t6[1] = t2[1] + t3[1]; - t7[0] = t2[0] - t3[0]; - t7[1] = t2[1] - t3[1]; - - dout[0] = t4[0] + t6[0]; - dout[1] = t4[1] + t6[1]; - dout[4] = t4[0] - t6[0]; - dout[5] = t4[1] - t6[1]; - dout[2] = t5[0] - t7[1]; - dout[3] = t5[1] + t7[0]; - dout[6] = t5[0] + t7[1]; - dout[7] = t5[1] - t7[0]; -} - -void -ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - float *LUT8 = (float*) p->ws + p->ws_is[0]; - - V4SF_L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_K_N(0, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); -} - -void -ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - double *LUT8 = (double*) p->ws + p->ws_is[0]; - -#if MACROS_READY - L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); -#endif -} - -void -ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - float *LUT8 = (float*) p->ws + p->ws_is[0]; - - V4SF_L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_K_N(1, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); -} - -void -ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - double *LUT8 = (double*) p->ws + p->ws_is[0]; - -#if MACROS_READY - L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); -#endif -} - -void -ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - float *LUT8 = (float*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - - V4SF_L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - V4SF_L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - V4SF_K_N(0, V4SF_LD(LUT8), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_K_N(0, V4SF_LD(LUT8+8), V4SF_LD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); - V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - V4SF_K_N(0, V4SF_LD(LUT8+16), V4SF_LD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); -} - -void -ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - double *LUT8 = (double*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - -#ifdef MACROS_READY - L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); -#endif -} - -void -ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out) -{ - const float *din = (const float*) in; - float *dout = (float*) out; - float *LUT8 = (float*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - - V4SF_L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - V4SF_L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - V4SF_K_N(1, V4SF_LD(LUT8+ 0), V4SF_LD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - V4SF_K_N(1, V4SF_LD(LUT8+ 8), V4SF_LD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); - V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - V4SF_K_N(1, V4SF_LD(LUT8+16), V4SF_LD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); -} - -void -ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out) -{ - const double *din = (const double*) in; - double *dout = (double*) out; - double *LUT8 = (double*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; - -#ifdef MACROS_READY - L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); - L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); - K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); - K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); - S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); - K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); - S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); -#endif -} \ No newline at end of file diff --git a/src/ffts_small.h b/src/ffts_small.h deleted file mode 100644 index 249dcc9..0000000 --- a/src/ffts_small.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - -This file is part of FFTS -- The Fastest Fourier Transform in the South - -Copyright (c) 2013, Michael J. Cree -Copyright (c) 2012, 2013, Anthony M. Blake - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of the organization nor the -names of its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#ifndef FFTS_SMALL_H -#define FFTS_SMALL_H - -#if defined (_MSC_VER) && (_MSC_VER >= 1020) -#pragma once -#endif - -#include "ffts.h" - -void -ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out); - -void -ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out); - -#endif /* FFTS_SMALL_H */ diff --git a/src/ffts_static.c b/src/ffts_static.c index 7a0bf4a..701cca8 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -38,80 +38,220 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -static const FFTS_ALIGN(16) float ffts_constants_32f[16] = { - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - -0.70710678118654746171500846685376f, - 0.70710678118654746171500846685376f, - -0.70710678118654746171500846685376f, - 0.70710678118654746171500846685376f, +static const FFTS_ALIGN(16) float ffts_constants_small_32f[24] = { 1.0f, 1.0f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + -0.0f, 0.0f, + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + 1.0f, + 1.0f, + 0.9238795325112867561281831893967882868224166258636425f, + 0.9238795325112867561281831893967882868224166258636425f, + + -0.0f, 0.0f, - -0.70710678118654746171500846685376f, - 0.70710678118654746171500846685376f + -0.3826834323650897717284599840303988667613445624856270f, + 0.3826834323650897717284599840303988667613445624856270f, + + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.3826834323650897717284599840303988667613445624856270f, + 0.3826834323650897717284599840303988667613445624856270f, + + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + -0.9238795325112867561281831893967882868224166258636425f, + 0.9238795325112867561281831893967882868224166258636425f }; -static const FFTS_ALIGN(16) float ffts_constants_inv_32f[16] = { - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, - 0.70710678118654746171500846685376f, - -0.70710678118654746171500846685376f, - 0.70710678118654746171500846685376f, - -0.70710678118654746171500846685376f, +static const FFTS_ALIGN(16) double ffts_constants_small_64f[24] = { + 1.0, + 1.0, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + + -0.0, + 0.0, + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + + 1.0, + 1.0, + 0.9238795325112867561281831893967882868224166258636425, + 0.9238795325112867561281831893967882868224166258636425, + + -0.0, + 0.0, + -0.3826834323650897717284599840303988667613445624856270, + 0.3826834323650897717284599840303988667613445624856270, + + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.3826834323650897717284599840303988667613445624856270, + 0.3826834323650897717284599840303988667613445624856270, + + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + -0.9238795325112867561281831893967882868224166258636425, + 0.9238795325112867561281831893967882868224166258636425 +}; + +static const FFTS_ALIGN(16) float ffts_constants_small_inv_32f[24] = { 1.0f, 1.0f, - 0.70710678118654757273731092936941f, - 0.70710678118654757273731092936941f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.0f, + -0.0f, + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f, + + 1.0f, + 1.0f, + 0.9238795325112867561281831893967882868224166258636425f, + 0.9238795325112867561281831893967882868224166258636425f, + 0.0f, - 0.70710678118654746171500846685376f, - -0.70710678118654746171500846685376f + -0.0f, + 0.3826834323650897717284599840303988667613445624856270f, + -0.3826834323650897717284599840303988667613445624856270f, + + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.3826834323650897717284599840303988667613445624856270f, + 0.3826834323650897717284599840303988667613445624856270f, + + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f, + 0.9238795325112867561281831893967882868224166258636425f, + -0.9238795325112867561281831893967882868224166258636425f +}; + +static const FFTS_ALIGN(16) double ffts_constants_small_inv_64f[24] = { + 1.0, + 1.0, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + + 0.0, + -0.0, + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740, + + 1.0, + 1.0, + 0.9238795325112867561281831893967882868224166258636425, + 0.9238795325112867561281831893967882868224166258636425, + + 0.0, + -0.0, + 0.3826834323650897717284599840303988667613445624856270, + -0.3826834323650897717284599840303988667613445624856270, + + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.3826834323650897717284599840303988667613445624856270, + 0.3826834323650897717284599840303988667613445624856270, + + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740, + 0.9238795325112867561281831893967882868224166258636425, + -0.9238795325112867561281831893967882868224166258636425 +}; + +static const FFTS_ALIGN(16) float ffts_constants_32f[16] = { + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + 1.0f, + 1.0f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + 0.0f, + 0.0f, + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f }; static const FFTS_ALIGN(16) double ffts_constants_64f[16] = { - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - -0.70710678118654746171500846685376, - 0.70710678118654746171500846685376, - -0.70710678118654746171500846685376, - 0.70710678118654746171500846685376, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 1.0, 1.0, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.0, 0.0, - -0.70710678118654746171500846685376, - 0.70710678118654746171500846685376 + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740 +}; + +static const FFTS_ALIGN(16) float ffts_constants_inv_32f[16] = { + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f, + + 1.0f, + 1.0f, + 0.7071067811865475244008443621048490392848359376884740f, + 0.7071067811865475244008443621048490392848359376884740f, + + 0.0f, + 0.0f, + 0.7071067811865475244008443621048490392848359376884740f, + -0.7071067811865475244008443621048490392848359376884740f }; static const FFTS_ALIGN(16) double ffts_constants_inv_64f[16] = { - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, - 0.70710678118654746171500846685376, - -0.70710678118654746171500846685376, - 0.70710678118654746171500846685376, - -0.70710678118654746171500846685376, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740, + 1.0, 1.0, - 0.70710678118654757273731092936941, - 0.70710678118654757273731092936941, + 0.7071067811865475244008443621048490392848359376884740, + 0.7071067811865475244008443621048490392848359376884740, + 0.0, 0.0, - 0.70710678118654746171500846685376, - -0.70710678118654746171500846685376 + 0.7071067811865475244008443621048490392848359376884740, + -0.7071067811865475244008443621048490392848359376884740 }; static FFTS_INLINE void @@ -425,6 +565,350 @@ ffts_static_firstpass_odd_32f(float *const FFTS_RESTRICT out, } } +void +ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, r0, r1; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[2]; + t1[1] = din[3]; + + r0[0] = t0[0] + t1[0]; + r0[1] = t0[1] + t1[1]; + r1[0] = t0[0] - t1[0]; + r1[1] = t0[1] - t1[1]; + + dout[0] = r0[0]; + dout[1] = r0[1]; + dout[2] = r1[0]; + dout[3] = r1[1]; +} + +void +ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, r0, r1; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[2]; + t1[1] = din[3]; + + r0[0] = t0[0] + t1[0]; + r0[1] = t0[1] + t1[1]; + r1[0] = t0[0] - t1[0]; + r1[1] = t0[1] - t1[1]; + + dout[0] = r0[0]; + dout[1] = r0[1]; + dout[2] = r1[0]; + dout[3] = r1[1]; +} + +void +ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] + t7[1]; + dout[3] = t5[1] - t7[0]; + dout[6] = t5[0] - t7[1]; + dout[7] = t5[1] + t7[0]; +} + +void +ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] + t7[1]; + dout[3] = t5[1] - t7[0]; + dout[6] = t5[0] - t7[1]; + dout[7] = t5[1] + t7[0]; +} + +void +ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *din = (const float*) in; + float *dout = (float*) out; + ffts_cpx_32f t0, t1, t2, t3, t4, t5, t6, t7; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] - t7[1]; + dout[3] = t5[1] + t7[0]; + dout[6] = t5[0] + t7[1]; + dout[7] = t5[1] - t7[0]; +} + +void +ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + ffts_cpx_64f t0, t1, t2, t3, t4, t5, t6, t7; + + /* unreferenced parameter */ + (void) p; + + t0[0] = din[0]; + t0[1] = din[1]; + t1[0] = din[4]; + t1[1] = din[5]; + t2[0] = din[2]; + t2[1] = din[3]; + t3[0] = din[6]; + t3[1] = din[7]; + + t4[0] = t0[0] + t1[0]; + t4[1] = t0[1] + t1[1]; + t5[0] = t0[0] - t1[0]; + t5[1] = t0[1] - t1[1]; + t6[0] = t2[0] + t3[0]; + t6[1] = t2[1] + t3[1]; + t7[0] = t2[0] - t3[0]; + t7[1] = t2[1] - t3[1]; + + dout[0] = t4[0] + t6[0]; + dout[1] = t4[1] + t6[1]; + dout[4] = t4[0] - t6[0]; + dout[5] = t4[1] - t6[1]; + dout[2] = t5[0] - t7[1]; + dout[3] = t5[1] + t7[0]; + dout[6] = t5[0] + t7[1]; + dout[7] = t5[1] - t7[0]; +} + +void +ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *FFTS_RESTRICT lut = ffts_constants_small_32f; + const float *din = (const float*) in; + float *dout = (float*) out; + V4SF r0_1, r2_3, r4_5, r6_7; + + /* unreferenced parameter */ + (void) p; + + V4SF_L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(0, V4SF_LD(lut), V4SF_LD(lut + 4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +} + +void +ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + V4SF r0_1, r2_3, r4_5, r6_7; + double *LUT8 = (double*) p->ws + p->ws_is[0]; + +#if MACROS_READY + L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +#endif +} + +void +ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *FFTS_RESTRICT lut = ffts_constants_small_inv_32f; + const float *din = (const float*) in; + float *dout = (float*) out; + V4SF r0_1, r2_3, r4_5, r6_7; + + /* unreferenced parameter */ + (void) p; + + V4SF_L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(1, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +} + +void +ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + V4SF r0_1, r2_3, r4_5, r6_7; + double *LUT8 = (double*) p->ws + p->ws_is[0]; + +#if MACROS_READY + L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + S_4(r0_1, r2_3, r4_5, r6_7, dout+0, dout+4, dout+8, dout+12); +#endif +} + +void +ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *FFTS_RESTRICT lut = ffts_constants_small_32f; + const float *din = (const float*) in; + float *dout = (float*) out; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + /* unreferenced parameter */ + (void) p; + + V4SF_L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + V4SF_L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + V4SF_K_N(0, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(0, V4SF_LD(lut+8), V4SF_LD(lut+12), &r0_1, &r4_5, &r8_9, &r12_13); + V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + V4SF_K_N(0, V4SF_LD(lut+16), V4SF_LD(lut+20), &r2_3, &r6_7, &r10_11, &r14_15); + V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +} + +void +ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + double *LUT8 = (double*) p->ws; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + +#ifdef MACROS_READY + L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(0, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(0, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(0, VLD(LUT8+8), VLD(LUT8+12), &r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(0, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +#endif +} + +void +ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out) +{ + const float *FFTS_RESTRICT lut = ffts_constants_small_inv_32f; + const float *din = (const float*) in; + float *dout = (float*) out; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + + /* unreferenced parameter */ + (void) p; + + V4SF_L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + V4SF_L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + V4SF_K_N(1, V4SF_LD(lut), V4SF_LD(lut+4), &r0_1, &r2_3, &r4_5, &r6_7); + V4SF_K_N(1, V4SF_LD(lut+8), V4SF_LD(lut+12), &r0_1, &r4_5, &r8_9, &r12_13); + V4SF_S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + V4SF_K_N(1, V4SF_LD(lut+16), V4SF_LD(lut+20), &r2_3, &r6_7, &r10_11, &r14_15); + V4SF_S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +} + +void +ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out) +{ + const double *din = (const double*) in; + double *dout = (double*) out; + double *LUT8 = (double*) p->ws; + V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + +#ifdef MACROS_READY + L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); + L_2_4(1, din+4, din+20, din+28, din+12, &r4_5, &r6_7, &r14_15, &r12_13); + K_N(1, VLD(LUT8), VLD(LUT8+4), &r0_1, &r2_3, &r4_5, &r6_7); + K_N(1, VLD(LUT8+8), VLD(LUT8+12),&r0_1, &r4_5, &r8_9, &r12_13); + S_4(r0_1, r4_5, r8_9, r12_13, dout+0, dout+8, dout+16, dout+24); + K_N(1, VLD(LUT8+16), VLD(LUT8+20), &r2_3, &r6_7, &r10_11, &r14_15); + S_4(r2_3, r6_7, r10_11, r14_15, dout+4, dout+12, dout+20, dout+28); +#endif +} + static FFTS_INLINE void ffts_static_firstpass_even_32f(float *FFTS_RESTRICT out, const float *FFTS_RESTRICT in, diff --git a/src/ffts_static.h b/src/ffts_static.h index 924c3e1..5a42fc2 100644 --- a/src/ffts_static.h +++ b/src/ffts_static.h @@ -40,7 +40,52 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts.h" -void ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out); -void ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out); +void +ffts_small_2_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_2_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward4_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward4_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward4_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward4_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward8_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward8_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward16_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward16_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out); + +void +ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out); #endif /* FFTS_STATIC_H */ -- cgit v1.1 From ee6ea4e982e6d4dd18cbe3703cfd24ae737e0ed1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 16 Mar 2015 13:33:41 +0200 Subject: Don't generate lookup tables when size is less than 32 --- src/ffts.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 8f809db..b0e4d27 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -483,12 +483,12 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) p->destroy = ffts_free_1d; p->N = N; - /* generate lookup tables */ - if (N > 4 && ffts_generate_luts(p, N, leaf_N, sign)) { - goto cleanup; - } - if (N >= 32) { + /* generate lookup tables */ + if (ffts_generate_luts(p, N, leaf_N, sign)) { + goto cleanup; + } + p->offsets = ffts_init_offsets(N, leaf_N); if (!p->offsets) { goto cleanup; -- cgit v1.1 From f869be2f8644d43d965a088850c28041b43a6eca Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 16 Mar 2015 16:22:47 +0200 Subject: Remove dead code --- src/ffts.c | 145 ++++++++++++++++++++----------------------------------------- 1 file changed, 47 insertions(+), 98 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index b0e4d27..8ffb0c3 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -56,22 +56,22 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(__arm__) && !defined(DYNAMIC_DISABLED) static const FFTS_ALIGN(64) float w_data[16] = { - 0.70710678118654757273731092936941f, - 0.70710678118654746171500846685376f, + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, -0.70710678118654757273731092936941f, -0.70710678118654746171500846685376f, - 1.0f, - 0.70710678118654757273731092936941f, + 1.0f, + 0.70710678118654757273731092936941f, -0.0f, -0.70710678118654746171500846685376f, - 0.70710678118654757273731092936941f, - 0.70710678118654746171500846685376f, - 0.70710678118654757273731092936941f, - 0.70710678118654746171500846685376f, - 1.0f, - 0.70710678118654757273731092936941f, - 0.0f, - 0.70710678118654746171500846685376f + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + 0.70710678118654757273731092936941f, + 0.70710678118654746171500846685376f, + 1.0f, + 0.70710678118654757273731092936941f, + 0.0f, + 0.70710678118654746171500846685376f }; #endif @@ -199,15 +199,14 @@ void ffts_free_1d(ffts_plan_t *p) free(p); } -static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) +static int +ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { V4SF MULI_SIGN; - int hardcoded; size_t lut_size; size_t n_luts; ffts_cpx_32f *w; - size_t i; - size_t n; + size_t i, n; if (sign < 0) { MULI_SIGN = V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f); @@ -217,13 +216,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) /* LUTS */ n_luts = ffts_ctzl(N / leaf_N); - if (N < 32) { - n_luts = ffts_ctzl(N / 4); - hardcoded = 1; - } else { - hardcoded = 0; - } - if (n_luts >= 32) { n_luts = 0; } @@ -231,20 +223,12 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) /* fprintf(stderr, "n_luts = %zu\n", n_luts); */ n = leaf_N * 2; - if (hardcoded) { - n = 8; - } lut_size = 0; - for (i = 0; i < n_luts; i++) { - if (!i || hardcoded) { + if (!i) { #if defined(__arm__) && !defined(DYNAMIC_DISABLED) - if (N <= 32) { - lut_size += n/4 * 2 * sizeof(ffts_cpx_32f); - } else { - lut_size += n/4 * sizeof(ffts_cpx_32f); - } + lut_size += n/4 * sizeof(ffts_cpx_32f); #else lut_size += n/4 * 2 * sizeof(ffts_cpx_32f); #endif @@ -259,8 +243,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) n *= 2; } - /* lut_size *= 16; */ - /* fprintf(stderr, "lut size = %zu\n", lut_size); */ if (n_luts) { @@ -276,11 +258,7 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) } w = p->ws; - n = leaf_N * 2; - if (hardcoded) { - n = 8; - } #ifdef HAVE_NEON V4SF neg = (sign < 0) ? V4SF_LIT4(0.0f, 0.0f, 0.0f, 0.0f) : V4SF_LIT4(-0.0f, -0.0f, -0.0f, -0.0f); @@ -290,61 +268,32 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) p->ws_is[i] = w - (ffts_cpx_32f*) p->ws; //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]); - if(!i || hardcoded) { + if (!i) { ffts_cpx_32f *w0 = FFTS_MALLOC(n/4 * sizeof(ffts_cpx_32f), 32); float *fw0 = (float*) w0; - float *fw = (float *)w; + float *fw = (float*) w; size_t j; for (j = 0; j < n/4; j++) { - w0[j][0] = W_re(n,j); - w0[j][1] = W_im(n,j); + w0[j][0] = W_re(n, j); + w0[j][1] = W_im(n, j); } #if defined(__arm__) && !defined(DYNAMIC_DISABLED) - if (N < 32) { - // w = FFTS_MALLOC(n/4 * 2 * sizeof(ffts_cpx_32f), 32); - float *fw = (float *)w; - V4SF temp0, temp1, temp2; - for (j=0; j0, im); -#else - im = V4SF_MULI(sign>0, im); -#endif - V4SF_ST(fw + j*4 , re); - V4SF_ST(fw + j*4+4, im); - // #endif - } - w += n/4 * 2; - } else { - //w = FFTS_MALLOC(n/4 * sizeof(ffts_cpx_32f), 32); - float *fw = (float *)w; #ifdef HAVE_NEON - { - V4SF2 temp0, temp1, temp2; - for (j=0; jws[i] = w; n *= 2; } @@ -465,7 +414,8 @@ cleanup: return -1; } -ffts_plan_t *ffts_init_1d(size_t N, int sign) +ffts_plan_t* +ffts_init_1d(size_t N, int sign) { const size_t leaf_N = 8; ffts_plan_t *p; @@ -484,10 +434,10 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) p->N = N; if (N >= 32) { - /* generate lookup tables */ - if (ffts_generate_luts(p, N, leaf_N, sign)) { - goto cleanup; - } + /* generate lookup tables */ + if (ffts_generate_luts(p, N, leaf_N, sign)) { + goto cleanup; + } p->offsets = ffts_init_offsets(N, leaf_N); if (!p->offsets) { @@ -518,7 +468,7 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) } #else /* determinate transform size */ -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(__arm__) if (N < 8192) { p->transform_size = 8192; } else { @@ -538,7 +488,6 @@ ffts_plan_t *ffts_init_1d(size_t N, int sign) goto cleanup; } - /* generate code */ p->transform = ffts_generate_func_code(p, N, leaf_N, sign); if (!p->transform) { -- cgit v1.1 From bc5aa8c1a2006a579b306234848d00a9ae34d362 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 16 Mar 2015 17:42:28 +0200 Subject: Determinate lookup table size using closed-form expression --- src/ffts.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 8ffb0c3..2b6b647 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -203,7 +203,6 @@ static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { V4SF MULI_SIGN; - size_t lut_size; size_t n_luts; ffts_cpx_32f *w; size_t i, n; @@ -220,32 +219,15 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) n_luts = 0; } - /* fprintf(stderr, "n_luts = %zu\n", n_luts); */ - - n = leaf_N * 2; + if (n_luts) { + size_t lut_size; - lut_size = 0; - for (i = 0; i < n_luts; i++) { - if (!i) { -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) - lut_size += n/4 * sizeof(ffts_cpx_32f); -#else - lut_size += n/4 * 2 * sizeof(ffts_cpx_32f); -#endif - n *= 2; - } else { #if defined(__arm__) && !defined(DYNAMIC_DISABLED) - lut_size += n/8 * 3 * sizeof(ffts_cpx_32f); + lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f) / 2; #else - lut_size += n/8 * 3 * 2 * sizeof(ffts_cpx_32f); + lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f); #endif - } - n *= 2; - } - - /* fprintf(stderr, "lut size = %zu\n", lut_size); */ - if (n_luts) { p->ws = FFTS_MALLOC(lut_size, 32); if (!p->ws) { goto cleanup; -- cgit v1.1 From 8dc312e88784ef67419f16bfb86defb7f6cc71c1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 17 Mar 2015 14:59:12 +0200 Subject: Remove dependency on YASM as Windows dynamic code is run-time generated --- CMakeLists.txt | 26 -- src/sse_win64.s | 828 -------------------------------------------------------- 2 files changed, 854 deletions(-) delete mode 100644 src/sse_win64.s diff --git a/CMakeLists.txt b/CMakeLists.txt index e96218b..1393689 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -226,32 +226,6 @@ elseif(HAVE_XMMINTRIN_H) ) if(MSVC) - if(NOT ENABLE_RUNTIME_DYNAMIC_CODE) - # YASM supports x86 GAS syntax - set(CMAKE_ASM-ATT_COMPILER yasm) - enable_language(ASM-ATT) - - if(CMAKE_ASM-ATT_COMPILER_WORKS) - add_custom_command( - OUTPUT sse_win64.obj - COMMAND ${CMAKE_ASM-ATT_COMPILER} -f win64 -m amd64 - -o ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj -p gas - ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/sse_win64.s - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Generating sse_win64.obj" - ) - - list(APPEND FFTS_SOURCES - ${CMAKE_CURRENT_BINARY_DIR}/sse_win64.obj - src/sse_win64.s - ) - else() - message(WARNING "YASM is required, enabling runtime dynamic code.") - set(ENABLE_RUNTIME_DYNAMIC_CODE ON) - endif(CMAKE_ASM-ATT_COMPILER_WORKS) - endif(NOT ENABLE_RUNTIME_DYNAMIC_CODE) - if(ENABLE_RUNTIME_DYNAMIC_CODE) add_definitions(-DSSE_DEFINE_CONSTANTS) endif(ENABLE_RUNTIME_DYNAMIC_CODE) diff --git a/src/sse_win64.s b/src/sse_win64.s deleted file mode 100644 index 193dedd..0000000 --- a/src/sse_win64.s +++ /dev/null @@ -1,828 +0,0 @@ -/* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - - .code64 - -#ifdef __APPLE__ - .globl _leaf_ee_init -_leaf_ee_init: -#else - .globl leaf_ee_init -leaf_ee_init: -#endif - -# rax is loop counter (init to 0) -# rcx is a pointer to the ffts_plan -# rdx is 'in' base pointer -# rbx is loop max count -# rsi is constants pointer -# r9 is offsets pointer -# r8 is 'out' base pointer -# scratch: rax r10 r11 - - xorl %eax, %eax - movq (%rcx), %r9 - movq 0xe0(%rcx), %rsi - -# _leaf_ee + 8 needs 16 byte alignment -#ifdef __APPLE__ - .globl _leaf_ee -_leaf_ee: -#else - .globl leaf_ee -leaf_ee: -#endif - movaps 32(%rsi), %xmm0 #83.5 - movaps (%rsi), %xmm8 #83.5 -LEAF_EE_1: -LEAF_EE_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm7 #83.5 -LEAF_EE_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm12 #83.5 - movaps %xmm7, %xmm6 #83.5 -LEAF_EE_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm10 #83.5 - movaps %xmm12, %xmm11 #83.5 - subps %xmm10, %xmm12 #83.5 - addps %xmm10, %xmm11 #83.5 - xorps %xmm8, %xmm12 #83.5 -LEAF_EE_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm9 #83.5 -LEAF_EE_const_4: - movaps 0xFECA(%rdx,%rax,4), %xmm10 #83.5 - addps %xmm9, %xmm6 #83.5 - subps %xmm9, %xmm7 #83.5 -LEAF_EE_const_5: - movaps 0xFECA(%rdx,%rax,4), %xmm13 #83.5 - movaps %xmm10, %xmm9 #83.5 -LEAF_EE_const_6: - movaps 0xFECA(%rdx,%rax,4), %xmm3 #83.5 - movaps %xmm6, %xmm5 #83.5 -LEAF_EE_const_7: - movaps 0xFECA(%rdx,%rax,4), %xmm14 #83.5 - movaps %xmm3, %xmm15 #83.5 - shufps $177, %xmm12, %xmm12 #83.5 - movaps %xmm7, %xmm4 #83.5 - movslq (%r9, %rax, 4), %r10 #83.44 - subps %xmm13, %xmm10 #83.5 - subps %xmm14, %xmm3 #83.5 - addps %xmm11, %xmm5 #83.5 - subps %xmm11, %xmm6 #83.5 - subps %xmm12, %xmm4 #83.5 - addps %xmm12, %xmm7 #83.5 - addps %xmm13, %xmm9 #83.5 - addps %xmm14, %xmm15 #83.5 - movaps 16(%rsi), %xmm12 #83.5 - movaps %xmm9, %xmm1 #83.5 - movaps 16(%rsi), %xmm11 #83.5 - movaps %xmm5, %xmm2 #83.5 - mulps %xmm10, %xmm12 #83.5 - subps %xmm15, %xmm9 #83.5 - addps %xmm15, %xmm1 #83.5 - mulps %xmm3, %xmm11 #83.5 - addps %xmm1, %xmm2 #83.5 - subps %xmm1, %xmm5 #83.5 - shufps $177, %xmm10, %xmm10 #83.5 - xorps %xmm8, %xmm9 #83.5 - shufps $177, %xmm3, %xmm3 #83.5 - movaps %xmm6, %xmm1 #83.5 - mulps %xmm0, %xmm10 #83.5 - movaps %xmm4, %xmm13 #83.5 - mulps %xmm0, %xmm3 #83.5 - subps %xmm10, %xmm12 #83.5 - addps %xmm3, %xmm11 #83.5 - movaps %xmm12, %xmm3 #83.5 - movaps %xmm7, %xmm14 #83.5 - shufps $177, %xmm9, %xmm9 #83.5 - subps %xmm11, %xmm12 #83.5 - addps %xmm11, %xmm3 #83.5 - subps %xmm9, %xmm1 #83.5 - addps %xmm9, %xmm6 #83.5 - addps %xmm3, %xmm4 #83.5 - subps %xmm3, %xmm13 #83.5 - xorps %xmm8, %xmm12 #83.5 - movaps %xmm2, %xmm3 #83.5 - shufps $177, %xmm12, %xmm12 #83.5 - movaps %xmm6, %xmm9 #83.5 - movslq 8(%r9, %rax, 4), %r11 #83.59 - movlhps %xmm4, %xmm3 #83.5 - addq $4, %rax - shufps $238, %xmm4, %xmm2 #83.5 - movaps %xmm1, %xmm4 #83.5 - subps %xmm12, %xmm7 #83.5 - addps %xmm12, %xmm14 #83.5 - movlhps %xmm7, %xmm4 #83.5 - shufps $238, %xmm7, %xmm1 #83.5 - movaps %xmm5, %xmm7 #83.5 - movlhps %xmm13, %xmm7 #83.5 - movlhps %xmm14, %xmm9 #83.5 - shufps $238, %xmm13, %xmm5 #83.5 - shufps $238, %xmm14, %xmm6 #83.5 - movaps %xmm3, (%r8,%r10,4) #83.5 - movaps %xmm4, 16(%r8,%r10,4) #83.5 - movaps %xmm7, 32(%r8,%r10,4) #83.5 - movaps %xmm9, 48(%r8,%r10,4) #83.5 - movaps %xmm2, (%r8,%r11,4) #83.5 - movaps %xmm1, 16(%r8,%r11,4) #83.5 - movaps %xmm5, 32(%r8,%r11,4) #83.5 - movaps %xmm6, 48(%r8,%r11,4) #83.5 - cmpq %rbx, %rax - jne LEAF_EE_1 - -# _leaf_oo + 3 needs to be 16 byte aligned -#ifdef __APPLE__ - .globl _leaf_oo -_leaf_oo: -#else - .globl leaf_oo -leaf_oo: -#endif - movaps (%rsi), %xmm5 #92.7 -LEAF_OO_1: -LEAF_OO_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm4 #93.5 - movaps %xmm4, %xmm6 #93.5 -LEAF_OO_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm7 #93.5 -LEAF_OO_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm10 #93.5 - addps %xmm7, %xmm6 #93.5 - subps %xmm7, %xmm4 #93.5 -LEAF_OO_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm8 #93.5 - movaps %xmm10, %xmm9 #93.5 -LEAF_OO_const_4: - movaps 0xFECA(%rdx,%rax,4), %xmm1 #93.5 - movaps %xmm6, %xmm3 #93.5 -LEAF_OO_const_5: - movaps 0xFECA(%rdx,%rax,4), %xmm11 #93.5 - movaps %xmm1, %xmm2 #93.5 -LEAF_OO_const_6: - movaps 0xFECA(%rdx,%rax,4), %xmm14 #93.5 - movaps %xmm4, %xmm15 #93.5 -LEAF_OO_const_7: - movaps 0xFECA(%rdx,%rax,4), %xmm12 #93.5 - movaps %xmm14, %xmm13 #93.5 - movslq (%r9, %rax, 4), %r10 #83.44 - subps %xmm8, %xmm10 #93.5 - addps %xmm8, %xmm9 #93.5 - addps %xmm11, %xmm2 #93.5 - subps %xmm12, %xmm14 #93.5 - subps %xmm11, %xmm1 #93.5 - addps %xmm12, %xmm13 #93.5 - addps %xmm9, %xmm3 #93.5 - subps %xmm9, %xmm6 #93.5 - xorps %xmm5, %xmm10 #93.5 - xorps %xmm5, %xmm14 #93.5 - shufps $177, %xmm10, %xmm10 #93.5 - movaps %xmm2, %xmm9 #93.5 - shufps $177, %xmm14, %xmm14 #93.5 - movaps %xmm6, %xmm7 #93.5 - movslq 8(%r9, %rax, 4), %r11 #83.59 - addq $4, %rax #92.18 - addps %xmm10, %xmm4 #93.5 - addps %xmm13, %xmm9 #93.5 - subps %xmm13, %xmm2 #93.5 - subps %xmm10, %xmm15 #93.5 - movaps %xmm1, %xmm13 #93.5 - movaps %xmm2, %xmm8 #93.5 - movlhps %xmm4, %xmm7 #93.5 - subps %xmm14, %xmm13 #93.5 - addps %xmm14, %xmm1 #93.5 - shufps $238, %xmm4, %xmm6 #93.5 - movaps %xmm3, %xmm14 #93.5 - movaps %xmm9, %xmm4 #93.5 - movlhps %xmm15, %xmm14 #93.5 - movlhps %xmm13, %xmm4 #93.5 - movlhps %xmm1, %xmm8 #93.5 - shufps $238, %xmm15, %xmm3 #93.5 - shufps $238, %xmm13, %xmm9 #93.5 - shufps $238, %xmm1, %xmm2 #93.5 - movaps %xmm14, (%r8,%r10,4) #93.5 - movaps %xmm7, 16(%r8,%r10,4) #93.5 - movaps %xmm4, 32(%r8,%r10,4) #93.5 - movaps %xmm8, 48(%r8,%r10,4) #93.5 - movaps %xmm3, (%r8,%r11,4) #93.5 - movaps %xmm6, 16(%r8,%r11,4) #93.5 - movaps %xmm9, 32(%r8,%r11,4) #93.5 - movaps %xmm2, 48(%r8,%r11,4) #93.5 - cmpq %rbx, %rax - jne LEAF_OO_1 # Prob 95% #92.14 - -#ifdef __APPLE__ - .globl _leaf_eo -_leaf_eo: -#else - .globl leaf_eo -leaf_eo: -#endif -LEAF_EO_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm9 #88.5 -LEAF_EO_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm7 #88.5 - movaps %xmm9, %xmm11 #88.5 -LEAF_EO_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm5 #88.5 - movaps %xmm7, %xmm6 #88.5 -LEAF_EO_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm4 #88.5 - subps %xmm5, %xmm7 #88.5 - addps %xmm4, %xmm11 #88.5 - subps %xmm4, %xmm9 #88.5 - addps %xmm5, %xmm6 #88.5 - movaps (%rsi), %xmm3 #88.5 - movaps %xmm11, %xmm10 #88.5 - xorps %xmm3, %xmm7 #88.5 - movaps %xmm9, %xmm8 #88.5 - shufps $177, %xmm7, %xmm7 #88.5 - addps %xmm6, %xmm10 #88.5 - subps %xmm6, %xmm11 #88.5 - subps %xmm7, %xmm8 #88.5 - addps %xmm7, %xmm9 #88.5 - movslq 8(%r9, %rax, 4), %r11 #83.59 - movaps %xmm10, %xmm2 #88.5 - movslq (%r9, %rax, 4), %r10 #83.44 - movaps %xmm11, %xmm1 #88.5 - shufps $238, %xmm8, %xmm10 #88.5 - shufps $238, %xmm9, %xmm11 #88.5 - movaps %xmm10, (%r8,%r11,4) #88.5 - movaps %xmm11, 16(%r8,%r11,4) #88.5 -LEAF_EO_const_4: - movaps 0xFECA(%rdx,%rax,4), %xmm15 #88.5 -LEAF_EO_const_5: - movaps 0xFECA(%rdx,%rax,4), %xmm12 #88.5 - movaps %xmm15, %xmm14 #88.5 -LEAF_EO_const_6: - movaps 0xFECA(%rdx,%rax,4), %xmm4 #88.5 - addps %xmm12, %xmm14 #88.5 - subps %xmm12, %xmm15 #88.5 -LEAF_EO_const_7: - movaps 0xFECA(%rdx,%rax,4), %xmm13 #88.5 - movaps %xmm4, %xmm5 #88.5 - movaps %xmm14, %xmm7 #88.5 - addps %xmm13, %xmm5 #88.5 - subps %xmm13, %xmm4 #88.5 - movlhps %xmm8, %xmm2 #88.5 - movaps %xmm5, %xmm8 #88.5 - movlhps %xmm15, %xmm7 #88.5 - xorps %xmm3, %xmm15 #88.5 - movaps %xmm5, %xmm6 #88.5 - subps %xmm14, %xmm5 #88.5 - addps %xmm14, %xmm6 #88.5 - movlhps %xmm9, %xmm1 #88.5 - movaps %xmm4, %xmm14 #88.5 - movlhps %xmm4, %xmm8 #88.5 - movaps %xmm1, %xmm12 #88.5 - shufps $177, %xmm15, %xmm15 #88.5 - movaps 0x30(%rsi), %xmm11 #88.5 - addq $4, %rax #90.5 - subps %xmm15, %xmm14 #88.5 - mulps %xmm7, %xmm11 #88.5 - addps %xmm15, %xmm4 #88.5 - movaps 0x30(%rsi), %xmm9 #88.5 - movaps 0x40(%rsi), %xmm15 #88.5 - shufps $177, %xmm7, %xmm7 #88.5 - mulps %xmm8, %xmm9 #88.5 - mulps %xmm15, %xmm7 #88.5 - shufps $177, %xmm8, %xmm8 #88.5 - subps %xmm7, %xmm11 #88.5 - mulps %xmm15, %xmm8 #88.5 - movaps %xmm11, %xmm10 #88.5 - addps %xmm8, %xmm9 #88.5 - shufps $238, %xmm14, %xmm6 #88.5 - subps %xmm9, %xmm11 #88.5 - addps %xmm9, %xmm10 #88.5 - xorps %xmm3, %xmm11 #88.5 - movaps %xmm2, %xmm3 #88.5 - shufps $177, %xmm11, %xmm11 #88.5 - subps %xmm10, %xmm3 #88.5 - addps %xmm10, %xmm2 #88.5 - addps %xmm11, %xmm12 #88.5 - subps %xmm11, %xmm1 #88.5 - shufps $238, %xmm4, %xmm5 #88.5 - movaps %xmm5, 48(%r8,%r11,4) #88.5 - movaps %xmm6, 32(%r8,%r11,4) #88.5 - movaps %xmm2, (%r8,%r10,4) #88.5 - movaps %xmm1, 16(%r8,%r10,4) #88.5 - movaps %xmm3, 32(%r8,%r10,4) #88.5 - movaps %xmm12, 48(%r8,%r10,4) #88.5 - -#ifdef __APPLE__ - .globl _leaf_oe -_leaf_oe: -#else - .globl leaf_oe -leaf_oe: -#endif - movaps (%rsi), %xmm0 #59.5 -LEAF_OE_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm6 #70.5 -LEAF_OE_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm8 #70.5 - movaps %xmm6, %xmm10 #70.5 - shufps $228, %xmm8, %xmm10 #70.5 - movaps %xmm10, %xmm9 #70.5 - shufps $228, %xmm6, %xmm8 #70.5 -LEAF_OE_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm12 #70.5 -LEAF_OE_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm7 #70.5 - movaps %xmm12, %xmm14 #70.5 - movslq (%r9, %rax, 4), %r10 #83.44 - addps %xmm8, %xmm9 #70.5 - subps %xmm8, %xmm10 #70.5 - addps %xmm7, %xmm14 #70.5 - subps %xmm7, %xmm12 #70.5 - movaps %xmm9, %xmm4 #70.5 - movaps %xmm14, %xmm13 #70.5 - shufps $238, %xmm10, %xmm4 #70.5 - xorps %xmm0, %xmm10 #70.5 - shufps $177, %xmm10, %xmm10 #70.5 - movaps %xmm12, %xmm11 #70.5 - movaps %xmm14, %xmm5 #70.5 - addps %xmm9, %xmm13 #70.5 - subps %xmm10, %xmm11 #70.5 - subps %xmm9, %xmm14 #70.5 - shufps $238, %xmm12, %xmm5 #70.5 - addps %xmm10, %xmm12 #70.5 - movslq 8(%r9, %rax, 4), %r11 #83.59 - movlhps %xmm11, %xmm13 #70.5 - movaps %xmm13, (%r8,%r10,4) #70.5 - movaps 0x30(%rsi), %xmm13 #70.5 - movlhps %xmm12, %xmm14 #70.5 - movaps 0x40(%rsi), %xmm12 #70.5 - mulps %xmm5, %xmm13 #70.5 - shufps $177, %xmm5, %xmm5 #70.5 - mulps %xmm12, %xmm5 #70.5 - movaps %xmm14, 16(%r8,%r10,4) #70.5 - subps %xmm5, %xmm13 #70.5 - movaps 0x30(%rsi), %xmm5 #70.5 - mulps %xmm4, %xmm5 #70.5 - shufps $177, %xmm4, %xmm4 #70.5 - mulps %xmm12, %xmm4 #70.5 -LEAF_OE_const_4: - movaps 0xFECA(%rdx,%rax,4), %xmm9 #70.5 - addps %xmm4, %xmm5 #70.5 -LEAF_OE_const_6: - movaps 0xFECA(%rdx,%rax,4), %xmm7 #70.5 - movaps %xmm9, %xmm3 #70.5 -LEAF_OE_const_7: - movaps 0xFECA(%rdx,%rax,4), %xmm2 #70.5 - movaps %xmm7, %xmm6 #70.5 -LEAF_OE_const_5: - movaps 0xFECA(%rdx,%rax,4), %xmm15 #70.5 - movaps %xmm13, %xmm4 #70.5 - subps %xmm2, %xmm7 #70.5 - addps %xmm15, %xmm3 #70.5 - subps %xmm15, %xmm9 #70.5 - addps %xmm2, %xmm6 #70.5 - subps %xmm5, %xmm13 #70.5 - addps %xmm5, %xmm4 #70.5 - xorps %xmm0, %xmm7 #70.5 - addq $4, %rax #72.5 - movaps %xmm3, %xmm2 #70.5 - shufps $177, %xmm7, %xmm7 #70.5 - movaps %xmm9, %xmm8 #70.5 - xorps %xmm0, %xmm13 #70.5 - addps %xmm6, %xmm2 #70.5 - subps %xmm7, %xmm8 #70.5 - subps %xmm6, %xmm3 #70.5 - addps %xmm7, %xmm9 #70.5 - movaps %xmm2, %xmm10 #70.5 - movaps %xmm3, %xmm11 #70.5 - shufps $238, %xmm8, %xmm2 #70.5 - shufps $238, %xmm9, %xmm3 #70.5 - movaps %xmm2, %xmm14 #70.5 - shufps $177, %xmm13, %xmm13 #70.5 - subps %xmm4, %xmm14 #70.5 - addps %xmm4, %xmm2 #70.5 - movaps %xmm3, %xmm4 #70.5 - subps %xmm13, %xmm3 #70.5 - addps %xmm13, %xmm4 #70.5 - movlhps %xmm8, %xmm10 #70.5 - movlhps %xmm9, %xmm11 #70.5 - movaps %xmm10, 32(%r8,%r10,4) #70.5 - movaps %xmm11, 48(%r8,%r10,4) #70.5 - movaps %xmm2, (%r8,%r11,4) #70.5 - movaps %xmm3, 16(%r8,%r11,4) #70.5 - movaps %xmm14, 32(%r8,%r11,4) #70.5 - movaps %xmm4, 48(%r8,%r11,4) #70.5 - -#ifdef __APPLE__ - .globl _leaf_end -_leaf_end: -#else - .globl leaf_end -leaf_end: -#endif - -#ifdef __APPLE__ - .globl _x_init -_x_init: -#else - .globl x_init -x_init: -#endif - movaps (%rsi), %xmm3 #34.3 - movq 0x20(%rcx), %r9 -#ifdef __APPLE__ - .globl _x4 -_x4: -#else - .globl x4 -x4: -#endif - movaps 64(%r8), %xmm0 #34.3 - movaps 96(%r8), %xmm1 #34.3 - movaps (%r8), %xmm7 #34.3 - movaps (%r9), %xmm4 #const - movaps %xmm7, %xmm9 #34.3 - movaps %xmm4, %xmm6 #34.3 - movaps 16(%r9), %xmm2 #const - mulps %xmm0, %xmm6 #34.3 - mulps %xmm1, %xmm4 #34.3 - shufps $177, %xmm0, %xmm0 #34.3 - shufps $177, %xmm1, %xmm1 #34.3 - mulps %xmm2, %xmm0 #34.3 - mulps %xmm1, %xmm2 #34.3 - subps %xmm0, %xmm6 #34.3 - addps %xmm2, %xmm4 #34.3 - movaps %xmm6, %xmm5 #34.3 - subps %xmm4, %xmm6 #34.3 - addps %xmm4, %xmm5 #34.3 - movaps 32(%r8), %xmm8 #34.3 - xorps %xmm3, %xmm6 #34.3 - shufps $177, %xmm6, %xmm6 #34.3 - movaps %xmm8, %xmm10 #34.3 - movaps 112(%r8), %xmm12 #34.3 - subps %xmm5, %xmm9 #34.3 - addps %xmm5, %xmm7 #34.3 - addps %xmm6, %xmm10 #34.3 - subps %xmm6, %xmm8 #34.3 - movaps %xmm7, (%r8) #34.3 - movaps %xmm8, 32(%r8) #34.3 - movaps %xmm9, 64(%r8) #34.3 - movaps %xmm10, 96(%r8) #34.3 - movaps 32(%r9), %xmm14 #const #34.3 - movaps 80(%r8), %xmm11 #34.3 - movaps %xmm14, %xmm0 #34.3 - movaps 48(%r9), %xmm13 #const #34.3 - mulps %xmm11, %xmm0 #34.3 - mulps %xmm12, %xmm14 #34.3 - shufps $177, %xmm11, %xmm11 #34.3 - shufps $177, %xmm12, %xmm12 #34.3 - mulps %xmm13, %xmm11 #34.3 - mulps %xmm12, %xmm13 #34.3 - subps %xmm11, %xmm0 #34.3 - addps %xmm13, %xmm14 #34.3 - movaps %xmm0, %xmm15 #34.3 - subps %xmm14, %xmm0 #34.3 - addps %xmm14, %xmm15 #34.3 - xorps %xmm3, %xmm0 #34.3 - movaps 16(%r8), %xmm1 #34.3 - movaps 48(%r8), %xmm2 #34.3 - movaps %xmm1, %xmm4 #34.3 - shufps $177, %xmm0, %xmm0 #34.3 - movaps %xmm2, %xmm5 #34.3 - addps %xmm15, %xmm1 #34.3 - subps %xmm0, %xmm2 #34.3 - subps %xmm15, %xmm4 #34.3 - addps %xmm0, %xmm5 #34.3 - movaps %xmm1, 16(%r8) #34.3 - movaps %xmm2, 48(%r8) #34.3 - movaps %xmm4, 80(%r8) #34.3 - movaps %xmm5, 112(%r8) #34.3 - ret - -# _x8_soft + 6 needs to be 16 byte aligned -#ifdef __APPLE__ - .globl _x8_soft -_x8_soft: -#else - .globl x8_soft -x8_soft: -#endif - # rax, rcx, rdx, r8, r9, r10, r11 - # rbx, rsi - - # input - movq %r9, %rax - - # output - movq %r8, %rcx - - # loop stop (output + output_stride) - leaq (%r8, %rbx), %rdx - - # 3 * output_stride - leaq (%rbx, %rbx, 2), %rsi - - # 5 * output_stride - leaq (%rbx, %rbx, 4), %r10 - - # 7 * output_stride - leaq (%rsi, %rbx, 4), %r11 - -X8_soft_loop: - # input + 0 * input_stride - movaps (%rax), %xmm9 - - # output + 2 * output_stride - movaps (%rcx, %rbx, 2), %xmm6 - - movaps %xmm9, %xmm11 - - # output + 3 * output_stride - movaps (%rcx, %rsi), %xmm7 - - # input + 1 * input_stride - movaps 16(%rax), %xmm8 - - mulps %xmm6, %xmm11 - mulps %xmm7, %xmm9 - shufps $177, %xmm6, %xmm6 - mulps %xmm8, %xmm6 - shufps $177, %xmm7, %xmm7 - subps %xmm6, %xmm11 - mulps %xmm7, %xmm8 - movaps %xmm11, %xmm10 - addps %xmm8, %xmm9 - - # input + 2 * input_stride - movaps 32(%rax), %xmm15 - - addps %xmm9, %xmm10 - subps %xmm9, %xmm11 - - # output + 0 * output_stride - movaps (%rcx), %xmm5 - - movaps %xmm15, %xmm6 - - # output + 4 * output_stride - movaps (%rcx, %rbx, 4), %xmm12 - - movaps %xmm5, %xmm2 - - # output + 6 * output_stride - movaps (%rcx, %rsi, 2), %xmm13 - - xorps %xmm3, %xmm11 #const - - # input + 3 * input_stride - movaps 48(%rax), %xmm14 - - subps %xmm10, %xmm2 - mulps %xmm12, %xmm6 - addps %xmm10, %xmm5 - mulps %xmm13, %xmm15 - - # input + 4 * input_stride - movaps 64(%rax), %xmm10 - - movaps %xmm5, %xmm0 - shufps $177, %xmm12, %xmm12 - shufps $177, %xmm13, %xmm13 - mulps %xmm14, %xmm12 - mulps %xmm13, %xmm14 - subps %xmm12, %xmm6 - addps %xmm14, %xmm15 - - # output + 5 * output_stride - movaps (%rcx, %r10), %xmm7 - - movaps %xmm10, %xmm13 - - # output + 7 * output_stride - movaps (%rcx, %r11), %xmm8 - - movaps %xmm6, %xmm12 - - # input + 5 * input_stride - movaps 80(%rax), %xmm9 - - # input + 6 * input_stride - addq $96, %rax - - mulps %xmm7, %xmm13 - subps %xmm15, %xmm6 - addps %xmm15, %xmm12 - mulps %xmm8, %xmm10 - subps %xmm12, %xmm0 - addps %xmm12, %xmm5 - shufps $177, %xmm7, %xmm7 - xorps %xmm3, %xmm6 #const - shufps $177, %xmm8, %xmm8 - movaps %xmm2, %xmm12 - mulps %xmm9, %xmm7 - mulps %xmm8, %xmm9 - subps %xmm7, %xmm13 - addps %xmm9, %xmm10 - - # output + 1 * output_stride - movaps (%rcx, %rbx), %xmm4 - - shufps $177, %xmm11, %xmm11 - movaps %xmm4, %xmm1 - shufps $177, %xmm6, %xmm6 - addps %xmm11, %xmm1 - subps %xmm11, %xmm4 - addps %xmm6, %xmm12 - subps %xmm6, %xmm2 - movaps %xmm13, %xmm11 - movaps %xmm4, %xmm14 - movaps %xmm1, %xmm6 - subps %xmm10, %xmm13 - addps %xmm10, %xmm11 - xorps %xmm3, %xmm13 #const - addps %xmm11, %xmm4 - subps %xmm11, %xmm14 - shufps $177, %xmm13, %xmm13 - - # output + 0 * output_stride - movaps %xmm5, (%rcx) - - # output + 1 * output_stride - movaps %xmm4, (%rcx, %rbx) - - # output + 2 * output_stride - movaps %xmm2, (%rcx, %rbx, 2) - - subps %xmm13, %xmm1 - addps %xmm13, %xmm6 - - # output + 3 * output_stride - movaps %xmm1, (%rcx, %rsi) - - # output + 4 * output_stride - movaps %xmm0, (%rcx, %rbx, 4) - - # output + 5 * output_stride - movaps %xmm14, (%rcx, %r10) - - # output + 6 * output_stride - movaps %xmm12, (%rcx, %rsi, 2) - - # output + 7 * output_stride - movaps %xmm6, (%rcx, %r11) - - # output + 8 * output_stride - addq $16, %rcx - - cmpq %rdx, %rcx - jne X8_soft_loop - ret - -#ifdef __APPLE__ - .globl _x8_soft_end -_x8_soft_end: -#else - .globl x8_soft_end -x8_soft_end: - -#ifdef __APPLE__ - .globl _sse_leaf_ee_offsets - .globl _sse_leaf_oo_offsets - .globl _sse_leaf_eo_offsets - .globl _sse_leaf_oe_offsets - .align 4 -_sse_leaf_ee_offsets: - .long LEAF_EE_const_0-_leaf_ee+0x4 - .long LEAF_EE_const_1-_leaf_ee+0x5 - .long LEAF_EE_const_2-_leaf_ee+0x5 - .long LEAF_EE_const_3-_leaf_ee+0x5 - .long LEAF_EE_const_4-_leaf_ee+0x5 - .long LEAF_EE_const_5-_leaf_ee+0x5 - .long LEAF_EE_const_6-_leaf_ee+0x4 - .long LEAF_EE_const_7-_leaf_ee+0x5 -_sse_leaf_oo_offsets: - .long LEAF_OO_const_0-_leaf_oo+0x4 - .long LEAF_OO_const_1-_leaf_oo+0x4 - .long LEAF_OO_const_2-_leaf_oo+0x5 - .long LEAF_OO_const_3-_leaf_oo+0x5 - .long LEAF_OO_const_4-_leaf_oo+0x4 - .long LEAF_OO_const_5-_leaf_oo+0x5 - .long LEAF_OO_const_6-_leaf_oo+0x5 - .long LEAF_OO_const_7-_leaf_oo+0x5 -_sse_leaf_eo_offsets: - .long LEAF_EO_const_0-_leaf_eo+0x5 - .long LEAF_EO_const_1-_leaf_eo+0x4 - .long LEAF_EO_const_2-_leaf_eo+0x4 - .long LEAF_EO_const_3-_leaf_eo+0x4 - .long LEAF_EO_const_4-_leaf_eo+0x5 - .long LEAF_EO_const_5-_leaf_eo+0x5 - .long LEAF_EO_const_6-_leaf_eo+0x4 - .long LEAF_EO_const_7-_leaf_eo+0x5 -_sse_leaf_oe_offsets: - .long LEAF_OE_const_0-_leaf_oe+0x5 - .long LEAF_OE_const_1-_leaf_oe+0x4 - .long LEAF_OE_const_2-_leaf_oe+0x4 - .long LEAF_OE_const_3-_leaf_oe+0x5 - .long LEAF_OE_const_4-_leaf_oe+0x5 - .long LEAF_OE_const_5-_leaf_oe+0x5 - .long LEAF_OE_const_6-_leaf_oe+0x4 - .long LEAF_OE_const_7-_leaf_oe+0x4 -#else - .globl sse_leaf_ee_offsets - .globl sse_leaf_oo_offsets - .globl sse_leaf_eo_offsets - .globl sse_leaf_oe_offsets - .align 4 -sse_leaf_ee_offsets: - .long LEAF_EE_const_0-leaf_ee+0x4 - .long LEAF_EE_const_1-leaf_ee+0x5 - .long LEAF_EE_const_2-leaf_ee+0x5 - .long LEAF_EE_const_3-leaf_ee+0x5 - .long LEAF_EE_const_4-leaf_ee+0x5 - .long LEAF_EE_const_5-leaf_ee+0x5 - .long LEAF_EE_const_6-leaf_ee+0x4 - .long LEAF_EE_const_7-leaf_ee+0x5 -sse_leaf_oo_offsets: - .long LEAF_OO_const_0-leaf_oo+0x4 - .long LEAF_OO_const_1-leaf_oo+0x4 - .long LEAF_OO_const_2-leaf_oo+0x5 - .long LEAF_OO_const_3-leaf_oo+0x5 - .long LEAF_OO_const_4-leaf_oo+0x4 - .long LEAF_OO_const_5-leaf_oo+0x5 - .long LEAF_OO_const_6-leaf_oo+0x5 - .long LEAF_OO_const_7-leaf_oo+0x5 -sse_leaf_eo_offsets: - .long LEAF_EO_const_0-leaf_eo+0x5 - .long LEAF_EO_const_1-leaf_eo+0x4 - .long LEAF_EO_const_2-leaf_eo+0x4 - .long LEAF_EO_const_3-leaf_eo+0x4 - .long LEAF_EO_const_4-leaf_eo+0x5 - .long LEAF_EO_const_5-leaf_eo+0x5 - .long LEAF_EO_const_6-leaf_eo+0x4 - .long LEAF_EO_const_7-leaf_eo+0x5 -sse_leaf_oe_offsets: - .long LEAF_OE_const_0-leaf_oe+0x5 - .long LEAF_OE_const_1-leaf_oe+0x4 - .long LEAF_OE_const_2-leaf_oe+0x4 - .long LEAF_OE_const_3-leaf_oe+0x5 - .long LEAF_OE_const_4-leaf_oe+0x5 - .long LEAF_OE_const_5-leaf_oe+0x5 - .long LEAF_OE_const_6-leaf_oe+0x4 - .long LEAF_OE_const_7-leaf_oe+0x4 -#endif - -#ifdef __APPLE__ - .data -#else - .section .data -#endif - .p2align 4 -#ifdef __APPLE__ - .globl _sse_constants -_sse_constants: -#else - .globl sse_constants -sse_constants: -#endif - .long 0x00000000,0x80000000,0x00000000,0x80000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3 -#ifdef __APPLE__ - .globl _sse_constants_inv -_sse_constants_inv: -#else - .globl sse_constants_inv -sse_constants_inv: -#endif - .long 0x80000000,0x00000000,0x80000000,0x00000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3 -- cgit v1.1 From deb54fd909ce5dcb2a74c33ffa05ee54500a5aa1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 18 Mar 2015 14:13:26 +0200 Subject: Always run-time generate x64 dynamic code --- src/codegen_sse.h | 2176 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 1335 insertions(+), 841 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index 558a015..c518481 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -1,33 +1,33 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -40,66 +40,36 @@ #include #include -#ifdef SSE_DEFINE_CONSTANTS static const FFTS_ALIGN(16) unsigned int sse_constants[20] = { - /* 0.0, -0.0, 0.0, -0.0 */ - 0x00000000, 0x80000000, 0x00000000, 0x80000000, - /* 0.707, 0.707, 0.707, 0.707 */ - 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, - /* -0.707, 0.707, -0.707, 0.707 */ - 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, - /* 1.0, 1.0, 0.707, 0.707 */ - 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, - /* 0.0, 0.0, -.707, 0.707 */ - 0x00000000, 0x00000000, 0xbf3504f3, 0x3f3504f3 + /* 0.0, -0.0, 0.0, -0.0 */ + 0x00000000, 0x80000000, 0x00000000, 0x80000000, + /* 0.707, 0.707, 0.707, 0.707 */ + 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, + /* -0.707, 0.707, -0.707, 0.707 */ + 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, + /* 1.0, 1.0, 0.707, 0.707 */ + 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, + /* 0.0, 0.0, -.707, 0.707 */ + 0x00000000, 0x00000000, 0xbf3504f3, 0x3f3504f3 }; static const FFTS_ALIGN(16) unsigned int sse_constants_inv[20] = { - /* -0.0, 0.0, -0.0, 0.0 */ - 0x80000000, 0x00000000, 0x80000000, 0x00000000, - /* 0.707, 0.707, 0.707, 0.707 */ + /* -0.0, 0.0, -0.0, 0.0 */ + 0x80000000, 0x00000000, 0x80000000, 0x00000000, + /* 0.707, 0.707, 0.707, 0.707 */ 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, 0x3f3504f3, - /* 0.707, -0.707, 0.707, -0.707 */ + /* 0.707, -0.707, 0.707, -0.707 */ 0x3f3504f3, 0xbf3504f3, 0x3f3504f3, 0xbf3504f3, - /* 1.0, 1.0, 0.707, 0.707 */ + /* 1.0, 1.0, 0.707, 0.707 */ 0x3f800000, 0x3f800000, 0x3f3504f3, 0x3f3504f3, - /* 0.0, 0.0, 0.707, -0.707 */ + /* 0.0, 0.0, 0.707, -0.707 */ 0x00000000, 0x00000000, 0x3f3504f3, 0xbf3504f3 }; -#else -extern void leaf_ee_init(); -extern void leaf_ee(); -extern void leaf_eo(); -extern void leaf_oe(); -extern void leaf_oo(); -extern void leaf_end(); - -extern void sse_constants(); -extern void sse_constants_inv(); - -extern const uint32_t sse_leaf_ee_offsets[8]; -extern const uint32_t sse_leaf_eo_offsets[8]; -extern const uint32_t sse_leaf_oe_offsets[8]; -extern const uint32_t sse_leaf_oo_offsets[8]; - -extern void x_init(); -extern void x4(); -extern void x8_soft(); -extern void x8_soft_end(); -#endif #define P(x) (*(*p)++ = x) -static void IMM32_NI(uint8_t *p, int32_t imm) -{ - int i; - - for (i = 0; i < 4; i++) { - *(p+i) = (imm & (0xff << (8 * i))) >> (8 * i); - } -} - -static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) +static FFTS_INLINE void +ffts_insert_nops(uint8_t **p, uint32_t count) { if (count >= 9) { P(0x66); @@ -175,36 +145,38 @@ static FFTS_INLINE void ffts_insert_nops(uint8_t **p, uint32_t count) } } -static FFTS_INLINE void ffts_align_mem16(uint8_t **p, uint32_t offset) +static FFTS_INLINE void +ffts_align_mem16(uint8_t **p, uint32_t offset) { int r = (16 - (offset & 0xf)) - (int) ((uintptr_t)(*p) & 0xf); r = (16 + r) & 0xf; ffts_insert_nops(p, r); } -static FFTS_INLINE void generate_epilogue(insns_t **fp) +static FFTS_INLINE void +generate_epilogue(insns_t **fp) { #ifdef _M_X64 /* restore nonvolatile registers */ - x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, -64, 8); - x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, -56, 8); - - x64_sse_movaps_reg_membase(*fp, X64_XMM6, X64_RSP, -48); - x64_sse_movaps_reg_membase(*fp, X64_XMM7, X64_RSP, -32); - x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RSP, -16); - x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RSP, 0); - x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RSP, 16); - x64_sse_movaps_reg_membase(*fp, X64_XMM11, X64_RSP, 32); - x64_sse_movaps_reg_membase(*fp, X64_XMM12, X64_RSP, 48); - x64_sse_movaps_reg_membase(*fp, X64_XMM13, X64_RSP, 64); + x64_mov_reg_membase(*fp, X64_RBX, X64_RSP, -64, 8); + x64_mov_reg_membase(*fp, X64_RSI, X64_RSP, -56, 8); + + x64_sse_movaps_reg_membase(*fp, X64_XMM6, X64_RSP, -48); + x64_sse_movaps_reg_membase(*fp, X64_XMM7, X64_RSP, -32); + x64_sse_movaps_reg_membase(*fp, X64_XMM8, X64_RSP, -16); + x64_sse_movaps_reg_membase(*fp, X64_XMM9, X64_RSP, 0); + x64_sse_movaps_reg_membase(*fp, X64_XMM10, X64_RSP, 16); + x64_sse_movaps_reg_membase(*fp, X64_XMM11, X64_RSP, 32); + x64_sse_movaps_reg_membase(*fp, X64_XMM12, X64_RSP, 48); + x64_sse_movaps_reg_membase(*fp, X64_XMM13, X64_RSP, 64); /* restore the last 2 registers from the shadow space */ - x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RSP, 96); - x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RSP, 112); + x64_sse_movaps_reg_membase(*fp, X64_XMM14, X64_RSP, 96); + x64_sse_movaps_reg_membase(*fp, X64_XMM15, X64_RSP, 112); /* restore stack */ - x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 88, 8); -#else + x64_alu_reg_imm_size(*fp, X86_ADD, X64_RSP, 88, 8); +#else x64_pop_reg(*fp, X64_R15); x64_pop_reg(*fp, X64_R14); x64_pop_reg(*fp, X64_R13); @@ -218,12 +190,13 @@ static FFTS_INLINE void generate_epilogue(insns_t **fp) x64_ret(*fp); } -static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) +static FFTS_INLINE insns_t* +generate_prologue(insns_t **fp, ffts_plan_t *p) { insns_t *start; - - /* unreferenced parameter */ - (void) p; + + /* unreferenced parameter */ + (void) p; /* align call destination */ ffts_align_mem16(fp, 0); @@ -232,740 +205,1151 @@ static FFTS_INLINE insns_t* generate_prologue(insns_t **fp, ffts_plan_t *p) /* save nonvolatile registers */ #ifdef _M_X64 /* reserve space to save XMM6-XMM15 registers */ - x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 88, 8); - - x64_mov_membase_reg(*fp, X64_RSP, -64, X64_RBX, 8); - x64_mov_membase_reg(*fp, X64_RSP, -56, X64_RSI, 8); - - x64_sse_movaps_membase_reg(*fp, X64_RSP, -48, X64_XMM6); - x64_sse_movaps_membase_reg(*fp, X64_RSP, -32, X64_XMM7); - x64_sse_movaps_membase_reg(*fp, X64_RSP, -16, X64_XMM8); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 0, X64_XMM9); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 16, X64_XMM10); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 32, X64_XMM11); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 48, X64_XMM12); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 64, X64_XMM13); - - /* use the shadow space to save last 2 registers */ - x64_sse_movaps_membase_reg(*fp, X64_RSP, 96, X64_XMM14); - x64_sse_movaps_membase_reg(*fp, X64_RSP, 112, X64_XMM15); + x64_alu_reg_imm_size(*fp, X86_SUB, X64_RSP, 88, 8); + + x64_mov_membase_reg(*fp, X64_RSP, -64, X64_RBX, 8); + x64_mov_membase_reg(*fp, X64_RSP, -56, X64_RSI, 8); + + x64_sse_movaps_membase_reg(*fp, X64_RSP, -48, X64_XMM6); + x64_sse_movaps_membase_reg(*fp, X64_RSP, -32, X64_XMM7); + x64_sse_movaps_membase_reg(*fp, X64_RSP, -16, X64_XMM8); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 0, X64_XMM9); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 16, X64_XMM10); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 32, X64_XMM11); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 48, X64_XMM12); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 64, X64_XMM13); + + /* use the shadow space to save last 2 registers */ + x64_sse_movaps_membase_reg(*fp, X64_RSP, 96, X64_XMM14); + x64_sse_movaps_membase_reg(*fp, X64_RSP, 112, X64_XMM15); #else - x64_push_reg(*fp, X64_RBP); - x64_push_reg(*fp, X64_RBX); - x64_push_reg(*fp, X64_R10); - x64_push_reg(*fp, X64_R11); - x64_push_reg(*fp, X64_R12); - x64_push_reg(*fp, X64_R13); - x64_push_reg(*fp, X64_R14); - x64_push_reg(*fp, X64_R15); + x64_push_reg(*fp, X64_RBP); + x64_push_reg(*fp, X64_RBX); + x64_push_reg(*fp, X64_R10); + x64_push_reg(*fp, X64_R11); + x64_push_reg(*fp, X64_R12); + x64_push_reg(*fp, X64_R13); + x64_push_reg(*fp, X64_R14); + x64_push_reg(*fp, X64_R15); #endif return start; } -static FFTS_INLINE void generate_transform_init(insns_t **fp) +static FFTS_INLINE void +generate_transform_init(insns_t **fp) { #ifdef _M_X64 /* generate function */ - x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_RSI, 0); + x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_RSI, 0); /* set "pointer" to twiddle factors */ - x64_mov_reg_membase(*fp, X64_R9, X64_RCX, 0x20, 8); + x64_mov_reg_membase(*fp, X64_R9, X64_RCX, 0x20, 8); #else - size_t len; + /* generate function */ + x64_sse_movaps_reg_membase(*fp, X64_XMM3, X64_R9, 0); - /* copy function */ - assert((char*) x4 > (char*) x_init); - len = (char*) x4 - (char*) x_init; - memcpy(*fp, x_init, len); - *fp += len; + /* set "pointer" to twiddle factors */ + x64_mov_reg_membase(*fp, X64_R8, X64_RDI, 0x20, 8); #endif } -static FFTS_INLINE insns_t* generate_size4_base_case(insns_t **fp, int sign) +static FFTS_INLINE insns_t* +generate_size4_base_case(insns_t **fp, int sign) { - insns_t *ins; + insns_t *ins; insns_t *x4_addr; -#ifndef _M_X64 - size_t len; -#endif - /* unreferenced parameter */ - (void) sign; + /* unreferenced parameter */ + (void) sign; - /* to avoid deferring */ - ins = *fp; + /* to avoid deferring */ + ins = *fp; /* align call destination */ ffts_align_mem16(&ins, 0); x4_addr = ins; #ifdef _M_X64 - /* generate function */ - x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R8, 64); - x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 96); - x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_R8, 0); - x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R9, 0); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); - x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R9, 16); - x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0); - x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2); - x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2); - x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); - x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); - x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112); - x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5); - x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5); - x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6); - x64_sse_movaps_membase_reg(ins, X64_R8, 0, X64_XMM7); - x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8); - x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9); - x64_sse_movaps_membase_reg(ins, X64_R8, 96, X64_XMM10); - x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R9, 32); - x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80); - x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); - x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48); - x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11); - x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13); - x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13); - x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); - - x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16); - x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2); - x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); - x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0); - x64_sse_movaps_membase_reg(ins, X64_R8, 16, X64_XMM1); - x64_sse_movaps_membase_reg(ins, X64_R8, 48, X64_XMM2); - x64_sse_movaps_membase_reg(ins, X64_R8, 80, X64_XMM4); - x64_sse_movaps_membase_reg(ins, X64_R8, 112, X64_XMM5); - x64_ret(ins); + /* generate function */ + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R8, 64); + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 96); + x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_R8, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R9, 0); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R9, 16); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R8, 32); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R8, 112); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6); + x64_sse_movaps_membase_reg(ins, X64_R8, 0, X64_XMM7); + x64_sse_movaps_membase_reg(ins, X64_R8, 32, X64_XMM8); + x64_sse_movaps_membase_reg(ins, X64_R8, 64, X64_XMM9); + x64_sse_movaps_membase_reg(ins, X64_R8, 96, X64_XMM10); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R9, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R8, 80); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); + + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_R8, 16); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 48); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0); + x64_sse_movaps_membase_reg(ins, X64_R8, 16, X64_XMM1); + x64_sse_movaps_membase_reg(ins, X64_R8, 48, X64_XMM2); + x64_sse_movaps_membase_reg(ins, X64_R8, 80, X64_XMM4); + x64_sse_movaps_membase_reg(ins, X64_R8, 112, X64_XMM5); + x64_ret(ins); #else - /* copy function */ - assert((char*) x8_soft > (char*) x4); - len = (char*) x8_soft - (char*) x4; - memcpy(ins, x4, len); - ins += len; + /* generate function */ + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_RDX, 64); + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_RDX, 96); + x64_sse_movaps_reg_membase(ins, X64_XMM7, X64_RDX, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM4, X64_R8, 0); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_R8, 16); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM1, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM2, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM0); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RDX, 32); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RDX, 112); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM6); + x64_sse_movaps_membase_reg(ins, X64_RDX, 0, X64_XMM7); + x64_sse_movaps_membase_reg(ins, X64_RDX, 32, X64_XMM8); + x64_sse_movaps_membase_reg(ins, X64_RDX, 64, X64_XMM9); + x64_sse_movaps_membase_reg(ins, X64_RDX, 96, X64_XMM10); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_R8, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RDX, 80); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R8, 48); + x64_sse_mulps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM0, X64_XMM3); + + x64_sse_movaps_reg_membase(ins, X64_XMM1, X64_RDX, 16); + x64_sse_movaps_reg_membase(ins, X64_XMM2, X64_RDX, 48); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM0, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM0); + x64_sse_movaps_membase_reg(ins, X64_RDX, 16, X64_XMM1); + x64_sse_movaps_membase_reg(ins, X64_RDX, 48, X64_XMM2); + x64_sse_movaps_membase_reg(ins, X64_RDX, 80, X64_XMM4); + x64_sse_movaps_membase_reg(ins, X64_RDX, 112, X64_XMM5); + x64_ret(ins); #endif - *fp = ins; + *fp = ins; return x4_addr; } -static FFTS_INLINE void generate_leaf_init(insns_t **fp, uint32_t loop_count) +static FFTS_INLINE void +generate_leaf_init(insns_t **fp, uint32_t loop_count) { -#ifndef _M_X64 - size_t len; -#endif - - /* to avoid deferring */ - insns_t *ins = *fp; + /* to avoid deferring */ + insns_t *ins = *fp; #ifdef _M_X64 - /* set loop counter */ - x86_mov_reg_imm(ins, X86_EBX, loop_count); + /* set loop counter */ + x86_mov_reg_imm(ins, X86_EBX, loop_count); - /* generate function */ + /* generate function */ /* clear */ - x86_clear_reg(ins, X86_EAX); + x86_clear_reg(ins, X86_EAX); /* set "pointer" to offsets */ - x64_mov_reg_membase(ins, X64_R9, X64_RCX, 0x0, 8); + x64_mov_reg_membase(ins, X64_R9, X64_RCX, 0x0, 8); /* set "pointer" to constants */ - x64_mov_reg_membase(ins, X64_RSI, X64_RCX, 0xE0, 8); - - /* use XMM3 for sign change */ - x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); + x64_mov_reg_membase(ins, X64_RSI, X64_RCX, 0xE0, 8); + + /* use XMM3 for sign change */ + x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); #else - /* set loop counter */ - x86_mov_reg_imm(ins, X86_ECX, loop_count); + /* set loop counter */ + x86_mov_reg_imm(ins, X86_ECX, loop_count); - /* copy function */ - assert((char*) leaf_ee > (char*) leaf_ee_init); - len = (char*) leaf_ee - (char*) leaf_ee_init; - memcpy(ins, leaf_ee_init, (size_t) len); - ins += len; + /* generate function */ - /* align loop/jump destination */ + /* clear */ + x86_clear_reg(ins, X86_EAX); + + /* set "pointer" to offsets */ + x64_mov_reg_membase(ins, X64_R8, X64_RDI, 0x0, 8); + + /* set "pointer" to constants */ + x64_mov_reg_membase(ins, X64_R9, X64_RDI, 0xE0, 8); + + /* align loop/jump destination */ ffts_align_mem16(&ins, 9); #endif - *fp = ins; + *fp = ins; } -static FFTS_INLINE void generate_leaf_ee(insns_t **fp, uint32_t *offsets, int extend) +static FFTS_INLINE void +generate_leaf_ee(insns_t **fp, uint32_t *offsets, int extend) { -#ifdef _M_X64 insns_t *leaf_ee_loop; -#else - size_t len; - int i; -#endif - /* to avoid deferring */ - insns_t *ins = *fp; + /* to avoid deferring */ + insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_RSI, 32, 1); + x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_RSI, 32, 1); - /* beginning of the loop (make sure it's 16 byte aligned) */ + /* beginning of the loop (make sure it's 16 byte aligned) */ leaf_ee_loop = ins; assert(!(((uintptr_t) leaf_ee_loop) & 0xF)); - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[0], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[2], X64_RAX, 2); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[3], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); - x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); - - x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); - x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9); - x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[5], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); - x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[6], X64_RAX, 2); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[7], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM8); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); - extend--; - - x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); - x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); - x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); - x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 16); - x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0); - extend--; - - x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10); - x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); - x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM8); - - x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); - extend--; - - x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0); - extend--; - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM3); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0); - extend--; - - x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); - x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4); - x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM0); - x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM8); - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM12); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM8, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM8); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM8); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); - - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); - x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); - x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); - x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0); - extend--; - - x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0); - extend--; - - x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13); - x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM8); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM4); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM7); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM9); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM1); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM5); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM6); - - /* loop condition */ - x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); - x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[2], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); + + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[6], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 16); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0); + extend--; + + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM8); + + x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0); + extend--; + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0); + extend--; + + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM8, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM8); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0); + extend--; + + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0); + extend--; + + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM8); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM6); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4); #else - /* copy function */ - assert((char*) leaf_oo > (char*) leaf_ee); - len = (char*) leaf_oo - (char*) leaf_ee; - memcpy(ins, leaf_ee, (size_t) len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(ins + sse_leaf_ee_offsets[i], offsets[i]); - } + x64_sse_movaps_reg_membase_size(ins, X64_XMM0, X64_R9, 32, 1); - ins += len; -#endif + /* use XMM8 for sign change */ + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_R9, 0); - *fp = ins; -} + /* beginning of the loop (make sure it's 16 byte aligned) */ + leaf_ee_loop = ins; + assert(!(((uintptr_t) leaf_ee_loop) & 0xF)); -static FFTS_INLINE void generate_leaf_eo(insns_t **fp, uint32_t *offsets) -{ -#ifndef _M_X64 - size_t len; - int i; + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[2], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM9); + + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RSI, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM3, X64_RSI, offsets[6], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RSI, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + + x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R9, 16); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM5, extend > 0); + extend--; + + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM3); + + x64_sse_addps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM5, X64_XMM1, extend > 0); + extend--; + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM9, X64_XMM8); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM3, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM1, X64_XMM6, extend > 0); + extend--; + + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM0); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM4); + x64_sse_mulps_reg_reg(ins, X64_XMM3, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM9, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM3); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM3); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM12, X64_XMM8); + + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM6); + x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM3, X64_XMM4); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM4, 0xEE); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM4, X64_XMM1, extend > 0); + extend--; + + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM1, X64_XMM7, 0xEE); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM5, extend > 0); + extend--; + + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM9, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM6); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_ee_loop, 0, 4); #endif - /* to avoid deferring */ - insns_t *ins = *fp; + *fp = ins; +} + +static FFTS_INLINE void +generate_leaf_eo(insns_t **fp, uint32_t *offsets) +{ + /* to avoid deferring */ + insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); - x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); - x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2); - x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5); - x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); - x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); - - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); - x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10); - x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM10); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM11); - x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[4], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[5], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15); - x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[6], X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12); - x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[7], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4); - x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13); - x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8); - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5); - x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3); - - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5); - x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14); - x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); - x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); - x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1); - x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 48); - x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11); - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15); - x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 64); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); - x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7); - x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); - x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); - - x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM2); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM0); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM11); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5); + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14); + x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_RSI, 48); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15); + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 64); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM0); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM12); #else - /* copy function */ - assert((char*) leaf_oe > (char*) leaf_eo); - len = (char*) leaf_oe - (char*) leaf_eo; - memcpy(ins, leaf_eo, len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(ins + sse_leaf_eo_offsets[i], offsets[i]); - } - - ins += len; + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RSI, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[1], X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_R9, 0); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM9, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM11); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RSI, offsets[4], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[6], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RSI, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM7, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM2, X64_XMM8); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM5); + x64_sse_movlhps_reg_reg(ins, X64_XMM7, X64_XMM15); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM15, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM5); + x64_sse_subps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM14); + x64_sse_movlhps_reg_reg(ins, X64_XMM1, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM15, X64_XMM15, 0xB1); + x64_sse_movaps_reg_membase(ins, X64_XMM11, X64_R9, 48); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM11); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM15); + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_R9, 64); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM15); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM7); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM14, 0xEE); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM4, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM12); #endif - *fp = ins; + *fp = ins; } -static FFTS_INLINE void generate_leaf_oe(insns_t **fp, uint32_t *offsets) +static FFTS_INLINE void +generate_leaf_oe(insns_t **fp, uint32_t *offsets) { -#ifndef _M_X64 - size_t len; - int i; -#endif - - /* to avoid deferring */ - insns_t *ins = *fp; + /* to avoid deferring */ + insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RDX, offsets[2], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4); - x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[0], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); - x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); - x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7); - x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); - x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); - x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE); - x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10); - x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); - x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM13); - x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RSI, 48); - x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13); - x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 64); - x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM14); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); - x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12); - x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[4], X64_RAX, 2); - x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4); - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[6], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM9); - x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RDX, offsets[7], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); - x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[5], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2); - x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM15); - x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); - - x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM0); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); - - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM0); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM9, 0xEE); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4); - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM0); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM13); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13); - x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8); - x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM10); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM11); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM0); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM14); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_RSI, 48); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_RSI, 64); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM14); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RDX, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM0, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM3); + + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM0); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); + + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM6); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM0); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM0, X64_XMM9, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM0); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM0); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM4); #else - /* copy function */ - assert((char*) leaf_end > (char*) leaf_oe); - len = (char*) leaf_end - (char*) leaf_oe; - memcpy(ins, leaf_oe, len); - - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(ins + sse_leaf_oe_offsets[i], offsets[i]); - } - - ins += len; + x64_sse_movaps_reg_membase(ins, X64_XMM0, X64_R9, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RSI, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RSI, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM6); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM8, 0xE4); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM6, 0xE4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM14, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM12, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM10, 0xEE); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM0); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM5, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM10); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM9); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM12, 0xEE); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM10); + x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2); + x64_sse_movlhps_reg_reg(ins, X64_XMM13, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM13, X64_R9, 48); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM12, X64_R9, 64); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM5, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM14); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM5); + x64_sse_mulps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM4, X64_XMM4, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM4, X64_XMM12); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RSI, offsets[4], X64_RAX, 2); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM3, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM2, X64_RSI, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); + x64_sse_movaps_reg_memindex(ins, X64_XMM15, X64_RSI, offsets[5], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM2); + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM15); + x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM7, X64_XMM0); + + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM0); + + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM6); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM3); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM8, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM9, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM3); + x64_sse_subps_reg_reg(ins, X64_XMM3, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_movlhps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM10); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM11); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM4); #endif - *fp = ins; + *fp = ins; } -static FFTS_INLINE void generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets, int extend) +static FFTS_INLINE void +generate_leaf_oo(insns_t **fp, uint32_t loop_count, uint32_t *offsets, int extend) { -#ifdef _M_X64 insns_t *leaf_oo_loop; -#else - size_t len; - int i; -#endif - /* to avoid deferring */ - insns_t *ins = *fp; + /* to avoid deferring */ + insns_t *ins = *fp; #ifdef _M_X64 - /* align loop/jump destination */ - x86_mov_reg_imm(ins, X86_EBX, loop_count); + /* align loop/jump destination */ + x86_mov_reg_imm(ins, X86_EBX, loop_count); - /* beginning of the loop (make sure it's 16 byte aligned) */ - leaf_oo_loop = ins; + /* beginning of the loop (make sure it's 16 byte aligned) */ + leaf_oo_loop = ins; assert(!(((uintptr_t) leaf_oo_loop) & 0xF)); - x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[0], X64_RAX, 2); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[2], X64_RAX, 2); - - x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); - extend--; - - x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); - x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RDX, offsets[4], X64_RAX, 2); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RDX, offsets[5], X64_RAX, 2); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); - extend--; - - x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[6], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4); - x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[7], X64_RAX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); - x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); - x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); - x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12); - x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); - - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM9); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM3); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); - - x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0); - extend--; - - x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); - x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10); - x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1); - x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); - - x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0); - extend--; - - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14); - x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM5); - x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); - x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15); - x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13); - x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM15, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM14); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM7); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM4); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM8); - x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM5); - x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM6); - x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM9); - x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM2); - - /* loop condition */ - x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); - x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[0], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RDX, offsets[2], X64_RAX, 2); + + x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RDX, offsets[4], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM5, X64_XMM6, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RDX, offsets[5], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RDX, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RDX, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_movsxd_reg_memindex(ins, X64_R10, X64_R9, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM3); + + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0); + extend--; + + x64_movsxd_reg_memindex(ins, X64_R11, X64_R9, 8, X64_RAX, 2); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); + + x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0); + extend--; + + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM5); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM5, X64_XMM15, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R10, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R10, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R10, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R10, 2, X64_XMM8); + x64_sse_movaps_memindex_reg(ins, X64_R8, 0, X64_R11, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R8, 16, X64_R11, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_R8, 32, X64_R11, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_R8, 48, X64_R11, 2, X64_XMM2); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RBX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4); #else - /* align loop/jump destination */ - x86_mov_reg_imm(ins, X86_ECX, loop_count); - ffts_align_mem16(&ins, 4); + /* align loop/jump destination */ + x86_mov_reg_imm(ins, X86_ECX, loop_count); + ffts_align_mem16(&ins, 4); - /* copy function */ - assert((char*) leaf_eo > (char*) leaf_oo); - len = (char*) leaf_eo - (char*) leaf_oo; - memcpy(ins, leaf_oo, len); + x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_R9, 0); - /* patch offsets */ - for (i = 0; i < 8; i++) { - IMM32_NI(ins + sse_leaf_oo_offsets[i], offsets[i]); - } + /* beginning of the loop (make sure it's 16 byte aligned) */ + leaf_oo_loop = ins; + assert(!(((uintptr_t) leaf_oo_loop) & 0xF)); - ins += len; + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RSI, offsets[0], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM6, X64_XMM4, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RSI, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM10, X64_RSI, offsets[2], X64_RAX, 2); + + x64_sse_addps_reg_reg_size(ins, X64_XMM6, X64_XMM7, extend > 0); + extend--; + + x64_sse_subps_reg_reg_size(ins, X64_XMM4, X64_XMM7, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RSI, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM1, X64_RSI, offsets[4], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM3, X64_XMM6, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM11, X64_RSI, offsets[5], X64_RAX, 2); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM2, X64_XMM1, extend > 0); + extend--; + + x64_sse_movaps_reg_memindex(ins, X64_XMM14, X64_RSI, offsets[6], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM15, X64_XMM4); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RSI, offsets[7], X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_movsxd_reg_memindex(ins, X64_R11, X64_R8, 0, X64_RAX, 2); + x64_sse_subps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_addps_reg_reg(ins, X64_XMM2, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM12); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM13, X64_XMM12); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM10, X64_XMM5); + + x64_sse_addps_reg_reg(ins, X64_XMM3, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM9); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM14, X64_XMM5); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM10, X64_XMM10, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM9, X64_XMM2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM14, X64_XMM14, 0xB1); + + x64_sse_movaps_reg_reg_size(ins, X64_XMM7, X64_XMM6, extend > 0); + extend--; + + x64_movsxd_reg_memindex(ins, X64_R12, X64_R8, 8, X64_RAX, 2); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM15, X64_XMM10); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM1); + x64_sse_movaps_reg_reg(ins, X64_XMM8, X64_XMM2); + + x64_sse_movlhps_reg_reg_size(ins, X64_XMM7, X64_XMM4, extend > 0); + extend--; + + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM14); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM14); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM4, 0xEE); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM3); + x64_sse_movaps_reg_reg(ins, X64_XMM4, X64_XMM9); + x64_sse_movlhps_reg_reg(ins, X64_XMM14, X64_XMM15); + x64_sse_movlhps_reg_reg(ins, X64_XMM4, X64_XMM13); + x64_sse_movlhps_reg_reg(ins, X64_XMM8, X64_XMM1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM3, X64_XMM15, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM9, X64_XMM13, 0xEE); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM2, X64_XMM1, 0xEE); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R11, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R11, 2, X64_XMM7); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R11, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R11, 2, X64_XMM8); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 0, X64_R12, 2, X64_XMM3); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 16, X64_R12, 2, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 32, X64_R12, 2, X64_XMM9); + x64_sse_movaps_memindex_reg(ins, X64_RDX, 48, X64_R12, 2, X64_XMM2); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, leaf_oo_loop, 0, 4); #endif - *fp = ins; + *fp = ins; } -static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) +static FFTS_INLINE insns_t* +generate_size8_base_case(insns_t **fp, int sign) { - insns_t *ins; + insns_t *ins; insns_t *x8_addr; -#ifdef _M_X64 insns_t *x8_soft_loop; -#else - size_t len; -#endif - /* unreferenced parameter */ - (void) sign; + /* unreferenced parameter */ + (void) sign; - /* to avoid deferring */ - ins = *fp; + /* to avoid deferring */ + ins = *fp; /* align call destination */ ffts_align_mem16(&ins, 0); @@ -982,193 +1366,303 @@ static FFTS_INLINE insns_t* generate_size8_base_case(insns_t **fp, int sign) /* generate function */ /* input */ - x64_mov_reg_reg(ins, X64_RAX, X64_R9, 8); + x64_mov_reg_reg(ins, X64_RAX, X64_R9, 8); /* output */ - x64_mov_reg_reg(ins, X64_RCX, X64_R8, 8); + x64_mov_reg_reg(ins, X64_RCX, X64_R8, 8); /* loop stop (RDX = output + output_stride) */ - x64_lea_memindex(ins, X64_RDX, X64_R8, 0, X64_RBX, 0); + x64_lea_memindex(ins, X64_RDX, X64_R8, 0, X64_RBX, 0); /* RSI = 3 * output_stride */ - x64_lea_memindex(ins, X64_RSI, X64_RBX, 0, X64_RBX, 1); + x64_lea_memindex(ins, X64_RSI, X64_RBX, 0, X64_RBX, 1); /* R10 = 5 * output_stride */ - x64_lea_memindex(ins, X64_R10, X64_RBX, 0, X64_RBX, 2); + x64_lea_memindex(ins, X64_R10, X64_RBX, 0, X64_RBX, 2); /* R11 = 7 * output_stride */ - x64_lea_memindex(ins, X64_R11, X64_RSI, 0, X64_RBX, 2); + x64_lea_memindex(ins, X64_R11, X64_RSI, 0, X64_RBX, 2); - /* beginning of the loop (make sure it's 16 byte aligned) */ + /* beginning of the loop (make sure it's 16 byte aligned) */ x8_soft_loop = ins; assert(!(((uintptr_t) x8_soft_loop) & 0xF)); /* load [input + 0 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 0); /* load [output + 2 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RCX, 0, X64_RBX, 1); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_RCX, 0, X64_RBX, 1); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); /* load [output + 3 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_RSI, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_RSI, 0); /* load [input + 1 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RAX, 16); - - x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6); - x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); - x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7); - x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RAX, 16); + + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); /* load [input + 2 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RAX, 32); + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RAX, 32); - x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); - x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); /* load [output + 0 * output_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RCX, 0); + x64_sse_movaps_reg_membase(ins, X64_XMM5, X64_RCX, 0); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15); /* load [output + 4 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RCX, 0, X64_RBX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_RCX, 0, X64_RBX, 2); - x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); /* load [output + 6 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RCX, 0, X64_RSI, 1); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_RCX, 0, X64_RSI, 1); - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); /* load [input + 3 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RAX, 48); + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RAX, 48); - x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10); - x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10); - x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13); /* load [input + 4 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RAX, 64); + x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RAX, 64); - x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); - x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); - x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); - /* load [output + 5 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_R10, 0); + /* load [output + 5 * output_stride] */ + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RCX, 0, X64_R10, 0); - x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10); - /* load [output + 7 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RCX, 0, X64_R11, 0); + /* load [output + 7 * output_stride] */ + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_RCX, 0, X64_R11, 0); - x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6); /* load [input + 5 * input_stride] */ - x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 80); + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RAX, 80); /* move input by 6 * input_stride */ - x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 0x60, 8); - - x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7); - x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15); - x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15); - x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8); - x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12); - x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2); - x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9); - x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7); - x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 0x60, 8); + + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); /* load [output + 1 * output_stride] */ - x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RCX, 0, X64_RBX, 0); - - x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); - x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); - x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11); - x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6); - x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6); - x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13); - x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); - x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1); - x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10); - x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); - - /* change sign */ - x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); - - x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11); - x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11); - x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RCX, 0, X64_RBX, 0); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); + + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); /* store [output + 0 * output_stride] */ - x64_sse_movaps_membase_reg(ins, X64_RCX, 0, X64_XMM5); + x64_sse_movaps_membase_reg(ins, X64_RCX, 0, X64_XMM5); /* store [output + 1 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 0, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 0, X64_XMM4); /* store [output + 2 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 1, X64_XMM2); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 1, X64_XMM2); - x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13); - x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13); /* store [output + 3 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 0, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 0, X64_XMM1); /* store [output + 4 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 2, X64_XMM0); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RBX, 2, X64_XMM0); /* store [output + 5 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R10, 0, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R10, 0, X64_XMM14); /* store [output + 6 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 1, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_RSI, 1, X64_XMM12); /* store [output + 7 * output_stride] */ - x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R11, 0, X64_XMM6); + x64_sse_movaps_memindex_reg(ins, X64_RCX, 0, X64_R11, 0, X64_XMM6); /* move output by 16 */ - x64_alu_reg_imm_size(ins, X86_ADD, X64_RCX, 16, 8); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RCX, 16, 8); /* loop condition */ - x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RDX, 8); - x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4); + x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RDX, 8); + x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4); x64_ret(ins); #else - /* copy function */ - assert((char*) x8_soft_end >= (char*) x8_soft); - len = (char*) x8_soft_end - (char*) x8_soft; - memcpy(ins, x8_soft, len); - ins += len; + /* generate function */ + x86_clear_reg(ins, X86_EAX); + x64_mov_reg_reg(ins, X64_RBX, X64_RDX, 8); + x64_mov_reg_reg(ins, X64_RSI, X64_R8, 8); + + x64_lea_memindex(ins, X64_R9, X64_RDX, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R10, X64_R9, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R11, X64_R10, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R12, X64_R11, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R13, X64_R12, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R14, X64_R13, 0, X64_RCX, 2); + x64_lea_memindex(ins, X64_R15, X64_R14, 0, X64_RCX, 2); + + /* beginning of the loop (make sure it's 16 byte aligned) */ + x8_soft_loop = ins; + assert(!(((uintptr_t) x8_soft_loop) & 0xF)); + + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 0); + x64_sse_movaps_reg_memindex(ins, X64_XMM6, X64_R10, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_R11, 0, X64_RAX, 2); + x64_sse_movaps_reg_membase(ins, X64_XMM8, X64_RSI, 16); + x64_sse_mulps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM7); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM8); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM6); + x64_sse_mulps_reg_reg(ins, X64_XMM8, X64_XMM7); + x64_sse_movaps_reg_reg(ins, X64_XMM10, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_movaps_reg_membase(ins, X64_XMM15, X64_RSI, 32); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_sse_subps_reg_reg(ins, X64_XMM11, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RBX, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM15); + x64_sse_movaps_reg_memindex(ins, X64_XMM12, X64_R12, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM2, X64_XMM5); + x64_sse_movaps_reg_memindex(ins, X64_XMM13, X64_R14, 0, X64_RAX, 2); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM11, X64_XMM3); + + x64_sse_movaps_reg_membase(ins, X64_XMM14, X64_RSI, 48); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM10); + x64_sse_mulps_reg_reg(ins, X64_XMM15, X64_XMM13); + x64_sse_movaps_reg_membase(ins, X64_XMM10, X64_RSI, 64); + x64_sse_movaps_reg_reg(ins, X64_XMM0, X64_XMM5); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM12, X64_XMM12, 0xB1); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_mulps_reg_reg(ins, X64_XMM12, X64_XMM14); + x64_sse_mulps_reg_reg(ins, X64_XMM14, X64_XMM13); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM15, X64_XMM14); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_R13, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM13, X64_XMM10); + x64_sse_movaps_reg_memindex(ins, X64_XMM8, X64_R15, 0, X64_RAX, 2); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM6); + x64_sse_movaps_reg_membase(ins, X64_XMM9, X64_RSI, 80); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RSI, 0x60, 8); + x64_sse_mulps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_subps_reg_reg(ins, X64_XMM6, X64_XMM15); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM15); + x64_sse_mulps_reg_reg(ins, X64_XMM10, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM0, X64_XMM12); + x64_sse_addps_reg_reg(ins, X64_XMM5, X64_XMM12); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM7, X64_XMM7, 0xB1); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM6, X64_XMM3); + + x64_sse_shufps_reg_reg_imm(ins, X64_XMM8, X64_XMM8, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM12, X64_XMM2); + x64_sse_mulps_reg_reg(ins, X64_XMM7, X64_XMM9); + x64_sse_mulps_reg_reg(ins, X64_XMM9, X64_XMM8); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM7); + x64_sse_addps_reg_reg(ins, X64_XMM10, X64_XMM9); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_R9, 0, X64_RAX, 2); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM11, X64_XMM11, 0xB1); + x64_sse_movaps_reg_reg(ins, X64_XMM1, X64_XMM4); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM6, X64_XMM6, 0xB1); + x64_sse_addps_reg_reg(ins, X64_XMM1, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_addps_reg_reg(ins, X64_XMM12, X64_XMM6); + x64_sse_subps_reg_reg(ins, X64_XMM2, X64_XMM6); + x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM13); + x64_sse_movaps_reg_reg(ins, X64_XMM14, X64_XMM4); + x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM1); + x64_sse_subps_reg_reg(ins, X64_XMM13, X64_XMM10); + x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM10); + + /* change sign */ + x64_sse_xorps_reg_reg(ins, X64_XMM13, X64_XMM3); + + x64_sse_addps_reg_reg(ins, X64_XMM4, X64_XMM11); + x64_sse_subps_reg_reg(ins, X64_XMM14, X64_XMM11); + x64_sse_shufps_reg_reg_imm(ins, X64_XMM13, X64_XMM13, 0xB1); + x64_sse_movaps_memindex_reg(ins, X64_RBX, 0, X64_RAX, 2, X64_XMM5); + x64_sse_movaps_memindex_reg(ins, X64_R9, 0, X64_RAX, 2, X64_XMM4); + x64_sse_movaps_memindex_reg(ins, X64_R10, 0, X64_RAX, 2, X64_XMM2); + x64_sse_subps_reg_reg(ins, X64_XMM1, X64_XMM13); + x64_sse_addps_reg_reg(ins, X64_XMM6, X64_XMM13); + x64_sse_movaps_memindex_reg(ins, X64_R11, 0, X64_RAX, 2, X64_XMM1); + x64_sse_movaps_memindex_reg(ins, X64_R12, 0, X64_RAX, 2, X64_XMM0); + x64_sse_movaps_memindex_reg(ins, X64_R13, 0, X64_RAX, 2, X64_XMM14); + x64_sse_movaps_memindex_reg(ins, X64_R14, 0, X64_RAX, 2, X64_XMM12); + x64_sse_movaps_memindex_reg(ins, X64_R15, 0, X64_RAX, 2, X64_XMM6); + x64_alu_reg_imm_size(ins, X86_ADD, X64_RAX, 4, 8); + + /* loop condition */ + x64_alu_reg_reg_size(ins, X86_CMP, X64_RCX, X64_RAX, 8); + x64_branch_size(ins, X86_CC_NE, x8_soft_loop, 0, 4); + x64_ret(ins); #endif - *fp = ins; + *fp = ins; return x8_addr; } -- cgit v1.1 From 83e81fda3974152ce5ef04a0a22f15e079cff394 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 18 Mar 2015 14:20:19 +0200 Subject: Remove unused sse.s --- CMakeLists.txt | 10 - src/sse.s | 885 --------------------------------------------------------- 2 files changed, 895 deletions(-) delete mode 100644 src/sse.s diff --git a/CMakeLists.txt b/CMakeLists.txt index 1393689..d83367e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -224,16 +224,6 @@ elseif(HAVE_XMMINTRIN_H) list(APPEND FFTS_SOURCES src/codegen_sse.h ) - - if(MSVC) - if(ENABLE_RUNTIME_DYNAMIC_CODE) - add_definitions(-DSSE_DEFINE_CONSTANTS) - endif(ENABLE_RUNTIME_DYNAMIC_CODE) - else() - list(APPEND FFTS_SOURCES - src/sse.s - ) - endif(MSVC) else() message(WARNING "Dynamic code is only supported with x64, disabling dynamic code.") set(DISABLE_DYNAMIC_CODE ON) diff --git a/src/sse.s b/src/sse.s deleted file mode 100644 index ccdebc8..0000000 --- a/src/sse.s +++ /dev/null @@ -1,885 +0,0 @@ -/* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - - .code64 - - .globl _neon_x4 - .align 4 -_neon_x4: - - .globl _neon_x8 - .align 4 -_neon_x8: - - .globl _neon_x8_t - .align 4 -_neon_x8_t: - - -#ifdef __APPLE__ - .globl _leaf_ee_init -_leaf_ee_init: -#else - .globl leaf_ee_init -leaf_ee_init: -#endif - #lea L_sse_constants(%rip), %r9 - movq (%rdi), %r8 - movq 0xe0(%rdi), %r9 - xorl %eax, %eax - -# eax is loop counter (init to 0) -# rcx is loop max count -# rsi is 'in' base pointer -# rdx is 'out' base pointer -# r8 is offsets pointer -# r9 is constants pointer -# scratch: rax r11 r12 -# .align 4, 0x90 - -# _leaf_ee + 9 needs 16 byte alignment -#ifdef __APPLE__ - .globl _leaf_ee -_leaf_ee: -#else - .globl leaf_ee -leaf_ee: -#endif - movaps 32(%r9), %xmm0 #83.5 - movaps (%r9), %xmm8 #83.5 -LEAF_EE_1: -LEAF_EE_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #83.5 -LEAF_EE_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #83.5 - movaps %xmm7, %xmm6 #83.5 -LEAF_EE_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 - movaps %xmm12, %xmm11 #83.5 - subps %xmm10, %xmm12 #83.5 - addps %xmm10, %xmm11 #83.5 - xorps %xmm8, %xmm12 #83.5 -LEAF_EE_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #83.5 -LEAF_EE_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #83.5 - addps %xmm9, %xmm6 #83.5 - subps %xmm9, %xmm7 #83.5 -LEAF_EE_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm13 #83.5 - movaps %xmm10, %xmm9 #83.5 -LEAF_EE_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm3 #83.5 - movaps %xmm6, %xmm5 #83.5 -LEAF_EE_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm14 #83.5 - movaps %xmm3, %xmm15 #83.5 - shufps $177, %xmm12, %xmm12 #83.5 - movaps %xmm7, %xmm4 #83.5 - movslq (%r8, %rax, 4), %r11 #83.44 - subps %xmm13, %xmm10 #83.5 - subps %xmm14, %xmm3 #83.5 - addps %xmm11, %xmm5 #83.5 - subps %xmm11, %xmm6 #83.5 - subps %xmm12, %xmm4 #83.5 - addps %xmm12, %xmm7 #83.5 - addps %xmm13, %xmm9 #83.5 - addps %xmm14, %xmm15 #83.5 - movaps 16(%r9), %xmm12 #83.5 - movaps %xmm9, %xmm1 #83.5 - movaps 16(%r9), %xmm11 #83.5 - movaps %xmm5, %xmm2 #83.5 - mulps %xmm10, %xmm12 #83.5 - subps %xmm15, %xmm9 #83.5 - addps %xmm15, %xmm1 #83.5 - mulps %xmm3, %xmm11 #83.5 - addps %xmm1, %xmm2 #83.5 - subps %xmm1, %xmm5 #83.5 - shufps $177, %xmm10, %xmm10 #83.5 - xorps %xmm8, %xmm9 #83.5 - shufps $177, %xmm3, %xmm3 #83.5 - movaps %xmm6, %xmm1 #83.5 - mulps %xmm0, %xmm10 #83.5 - movaps %xmm4, %xmm13 #83.5 - mulps %xmm0, %xmm3 #83.5 - subps %xmm10, %xmm12 #83.5 - addps %xmm3, %xmm11 #83.5 - movaps %xmm12, %xmm3 #83.5 - movaps %xmm7, %xmm14 #83.5 - shufps $177, %xmm9, %xmm9 #83.5 - subps %xmm11, %xmm12 #83.5 - addps %xmm11, %xmm3 #83.5 - subps %xmm9, %xmm1 #83.5 - addps %xmm9, %xmm6 #83.5 - addps %xmm3, %xmm4 #83.5 - subps %xmm3, %xmm13 #83.5 - xorps %xmm8, %xmm12 #83.5 - movaps %xmm2, %xmm3 #83.5 - shufps $177, %xmm12, %xmm12 #83.5 - movaps %xmm6, %xmm9 #83.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 - movlhps %xmm4, %xmm3 #83.5 - addq $4, %rax - shufps $238, %xmm4, %xmm2 #83.5 - movaps %xmm1, %xmm4 #83.5 - #movntdq %xmm3, (%rdx,%r11,4) #83.5 - subps %xmm12, %xmm7 #83.5 - addps %xmm12, %xmm14 #83.5 - movlhps %xmm7, %xmm4 #83.5 - shufps $238, %xmm7, %xmm1 #83.5 - movaps %xmm5, %xmm7 #83.5 - movlhps %xmm13, %xmm7 #83.5 - movlhps %xmm14, %xmm9 #83.5 - shufps $238, %xmm13, %xmm5 #83.5 - shufps $238, %xmm14, %xmm6 #83.5 - movaps %xmm3, (%rdx,%r11,4) #83.5 - movaps %xmm4, 16(%rdx,%r11,4) #83.5 - movaps %xmm7, 32(%rdx,%r11,4) #83.5 - movaps %xmm9, 48(%rdx,%r11,4) #83.5 - movaps %xmm2, (%rdx,%r12,4) #83.5 - movaps %xmm1, 16(%rdx,%r12,4) #83.5 - movaps %xmm5, 32(%rdx,%r12,4) #83.5 - movaps %xmm6, 48(%rdx,%r12,4) #83.5 - cmpq %rcx, %rax - jne LEAF_EE_1 - -# _leaf_oo + 4 needs to be 16 byte aligned -#ifdef __APPLE__ - .globl _leaf_oo -_leaf_oo: -#else - .globl leaf_oo -leaf_oo: -#endif - movaps (%r9), %xmm5 #92.7 -LEAF_OO_1: -LEAF_OO_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #93.5 - movaps %xmm4, %xmm6 #93.5 -LEAF_OO_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #93.5 -LEAF_OO_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm10 #93.5 - addps %xmm7, %xmm6 #93.5 - subps %xmm7, %xmm4 #93.5 -LEAF_OO_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm8 #93.5 - movaps %xmm10, %xmm9 #93.5 -LEAF_OO_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm1 #93.5 - movaps %xmm6, %xmm3 #93.5 -LEAF_OO_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm11 #93.5 - movaps %xmm1, %xmm2 #93.5 -LEAF_OO_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm14 #93.5 - movaps %xmm4, %xmm15 #93.5 -LEAF_OO_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #93.5 - movaps %xmm14, %xmm13 #93.5 - movslq (%r8, %rax, 4), %r11 #83.44 - subps %xmm8, %xmm10 #93.5 - addps %xmm8, %xmm9 #93.5 - addps %xmm11, %xmm2 #93.5 - subps %xmm12, %xmm14 #93.5 - subps %xmm11, %xmm1 #93.5 - addps %xmm12, %xmm13 #93.5 - addps %xmm9, %xmm3 #93.5 - subps %xmm9, %xmm6 #93.5 - xorps %xmm5, %xmm10 #93.5 - xorps %xmm5, %xmm14 #93.5 - shufps $177, %xmm10, %xmm10 #93.5 - movaps %xmm2, %xmm9 #93.5 - shufps $177, %xmm14, %xmm14 #93.5 - movaps %xmm6, %xmm7 #93.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 - addq $4, %rax #92.18 - addps %xmm10, %xmm4 #93.5 - addps %xmm13, %xmm9 #93.5 - subps %xmm13, %xmm2 #93.5 - subps %xmm10, %xmm15 #93.5 - movaps %xmm1, %xmm13 #93.5 - movaps %xmm2, %xmm8 #93.5 - movlhps %xmm4, %xmm7 #93.5 - subps %xmm14, %xmm13 #93.5 - addps %xmm14, %xmm1 #93.5 - shufps $238, %xmm4, %xmm6 #93.5 - movaps %xmm3, %xmm14 #93.5 - movaps %xmm9, %xmm4 #93.5 - movlhps %xmm15, %xmm14 #93.5 - movlhps %xmm13, %xmm4 #93.5 - movlhps %xmm1, %xmm8 #93.5 - shufps $238, %xmm15, %xmm3 #93.5 - shufps $238, %xmm13, %xmm9 #93.5 - shufps $238, %xmm1, %xmm2 #93.5 - movaps %xmm14, (%rdx,%r11,4) #93.5 - movaps %xmm7, 16(%rdx,%r11,4) #93.5 - movaps %xmm4, 32(%rdx,%r11,4) #93.5 - movaps %xmm8, 48(%rdx,%r11,4) #93.5 - movaps %xmm3, (%rdx,%r12,4) #93.5 - movaps %xmm6, 16(%rdx,%r12,4) #93.5 - movaps %xmm9, 32(%rdx,%r12,4) #93.5 - movaps %xmm2, 48(%rdx,%r12,4) #93.5 - cmpq %rcx, %rax - jne LEAF_OO_1 # Prob 95% #92.14 - -#ifdef __APPLE__ - .globl _leaf_eo -_leaf_eo: -#else - .globl leaf_eo -leaf_eo: -#endif -LEAF_EO_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #88.5 -LEAF_EO_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #88.5 - movaps %xmm9, %xmm11 #88.5 -LEAF_EO_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm5 #88.5 - movaps %xmm7, %xmm6 #88.5 -LEAF_EO_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 - subps %xmm5, %xmm7 #88.5 - addps %xmm4, %xmm11 #88.5 - subps %xmm4, %xmm9 #88.5 - addps %xmm5, %xmm6 #88.5 - movaps (%r9), %xmm3 #88.5 - movaps %xmm11, %xmm10 #88.5 - xorps %xmm3, %xmm7 #88.5 - movaps %xmm9, %xmm8 #88.5 - shufps $177, %xmm7, %xmm7 #88.5 - addps %xmm6, %xmm10 #88.5 - subps %xmm6, %xmm11 #88.5 - subps %xmm7, %xmm8 #88.5 - addps %xmm7, %xmm9 #88.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 - movaps %xmm10, %xmm2 #88.5 - movslq (%r8, %rax, 4), %r11 #83.44 - movaps %xmm11, %xmm1 #88.5 - shufps $238, %xmm8, %xmm10 #88.5 - shufps $238, %xmm9, %xmm11 #88.5 - movaps %xmm10, (%rdx,%r12,4) #88.5 - movaps %xmm11, 16(%rdx,%r12,4) #88.5 -LEAF_EO_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm15 #88.5 -LEAF_EO_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #88.5 - movaps %xmm15, %xmm14 #88.5 -LEAF_EO_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm4 #88.5 - addps %xmm12, %xmm14 #88.5 - subps %xmm12, %xmm15 #88.5 -LEAF_EO_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm13 #88.5 - movaps %xmm4, %xmm5 #88.5 - movaps %xmm14, %xmm7 #88.5 - addps %xmm13, %xmm5 #88.5 - subps %xmm13, %xmm4 #88.5 - movlhps %xmm8, %xmm2 #88.5 - movaps %xmm5, %xmm8 #88.5 - movlhps %xmm15, %xmm7 #88.5 - xorps %xmm3, %xmm15 #88.5 - movaps %xmm5, %xmm6 #88.5 - subps %xmm14, %xmm5 #88.5 - addps %xmm14, %xmm6 #88.5 - movlhps %xmm9, %xmm1 #88.5 - movaps %xmm4, %xmm14 #88.5 - movlhps %xmm4, %xmm8 #88.5 - movaps %xmm1, %xmm12 #88.5 - shufps $177, %xmm15, %xmm15 #88.5 - movaps 0x30(%r9), %xmm11 #88.5 - addq $4, %rax #90.5 - subps %xmm15, %xmm14 #88.5 - mulps %xmm7, %xmm11 #88.5 - addps %xmm15, %xmm4 #88.5 - movaps 0x30(%r9), %xmm9 #88.5 - movaps 0x40(%r9), %xmm15 #88.5 - shufps $177, %xmm7, %xmm7 #88.5 - mulps %xmm8, %xmm9 #88.5 - mulps %xmm15, %xmm7 #88.5 - shufps $177, %xmm8, %xmm8 #88.5 - subps %xmm7, %xmm11 #88.5 - mulps %xmm15, %xmm8 #88.5 - movaps %xmm11, %xmm10 #88.5 - addps %xmm8, %xmm9 #88.5 - shufps $238, %xmm14, %xmm6 #88.5 - subps %xmm9, %xmm11 #88.5 - addps %xmm9, %xmm10 #88.5 - xorps %xmm3, %xmm11 #88.5 - movaps %xmm2, %xmm3 #88.5 - shufps $177, %xmm11, %xmm11 #88.5 - subps %xmm10, %xmm3 #88.5 - addps %xmm10, %xmm2 #88.5 - addps %xmm11, %xmm12 #88.5 - subps %xmm11, %xmm1 #88.5 - shufps $238, %xmm4, %xmm5 #88.5 - movaps %xmm5, 48(%rdx,%r12,4) #88.5 - movaps %xmm6, 32(%rdx,%r12,4) #88.5 - movaps %xmm2, (%rdx,%r11,4) #88.5 - movaps %xmm1, 16(%rdx,%r11,4) #88.5 - movaps %xmm3, 32(%rdx,%r11,4) #88.5 - movaps %xmm12, 48(%rdx,%r11,4) #88.5 - -#ifdef __APPLE__ - .globl _leaf_oe -_leaf_oe: -#else - .globl leaf_oe -leaf_oe: -#endif - movaps (%r9), %xmm0 #59.5 - #movaps 0x20(%r9), %xmm1 #59.5 -LEAF_OE_const_2: - movaps 0xFECA(%rsi,%rax,4), %xmm6 #70.5 -LEAF_OE_const_3: - movaps 0xFECA(%rsi,%rax,4), %xmm8 #70.5 - movaps %xmm6, %xmm10 #70.5 - shufps $228, %xmm8, %xmm10 #70.5 - movaps %xmm10, %xmm9 #70.5 - shufps $228, %xmm6, %xmm8 #70.5 -LEAF_OE_const_0: - movaps 0xFECA(%rsi,%rax,4), %xmm12 #70.5 -LEAF_OE_const_1: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 - movaps %xmm12, %xmm14 #70.5 - movslq (%r8, %rax, 4), %r11 #83.44 - addps %xmm8, %xmm9 #70.5 - subps %xmm8, %xmm10 #70.5 - addps %xmm7, %xmm14 #70.5 - subps %xmm7, %xmm12 #70.5 - movaps %xmm9, %xmm4 #70.5 - movaps %xmm14, %xmm13 #70.5 - shufps $238, %xmm10, %xmm4 #70.5 - xorps %xmm0, %xmm10 #70.5 - shufps $177, %xmm10, %xmm10 #70.5 - movaps %xmm12, %xmm11 #70.5 - movaps %xmm14, %xmm5 #70.5 - addps %xmm9, %xmm13 #70.5 - subps %xmm10, %xmm11 #70.5 - subps %xmm9, %xmm14 #70.5 - shufps $238, %xmm12, %xmm5 #70.5 - addps %xmm10, %xmm12 #70.5 - movslq 8(%r8, %rax, 4), %r12 #83.59 - movlhps %xmm11, %xmm13 #70.5 - movaps %xmm13, (%rdx,%r11,4) #70.5 - movaps 0x30(%r9), %xmm13 #70.5 - movlhps %xmm12, %xmm14 #70.5 - movaps 0x40(%r9), %xmm12 #70.5 - mulps %xmm5, %xmm13 #70.5 - shufps $177, %xmm5, %xmm5 #70.5 - mulps %xmm12, %xmm5 #70.5 - movaps %xmm14, 16(%rdx,%r11,4) #70.5 - subps %xmm5, %xmm13 #70.5 - movaps 0x30(%r9), %xmm5 #70.5 - mulps %xmm4, %xmm5 #70.5 - shufps $177, %xmm4, %xmm4 #70.5 - mulps %xmm12, %xmm4 #70.5 -LEAF_OE_const_4: - movaps 0xFECA(%rsi,%rax,4), %xmm9 #70.5 - addps %xmm4, %xmm5 #70.5 -LEAF_OE_const_6: - movaps 0xFECA(%rsi,%rax,4), %xmm7 #70.5 - movaps %xmm9, %xmm3 #70.5 -LEAF_OE_const_7: - movaps 0xFECA(%rsi,%rax,4), %xmm2 #70.5 - movaps %xmm7, %xmm6 #70.5 -LEAF_OE_const_5: - movaps 0xFECA(%rsi,%rax,4), %xmm15 #70.5 - movaps %xmm13, %xmm4 #70.5 - subps %xmm2, %xmm7 #70.5 - addps %xmm15, %xmm3 #70.5 - subps %xmm15, %xmm9 #70.5 - addps %xmm2, %xmm6 #70.5 - subps %xmm5, %xmm13 #70.5 - addps %xmm5, %xmm4 #70.5 - xorps %xmm0, %xmm7 #70.5 - addq $4, %rax #72.5 - movaps %xmm3, %xmm2 #70.5 - shufps $177, %xmm7, %xmm7 #70.5 - movaps %xmm9, %xmm8 #70.5 - xorps %xmm0, %xmm13 #70.5 - addps %xmm6, %xmm2 #70.5 - subps %xmm7, %xmm8 #70.5 - subps %xmm6, %xmm3 #70.5 - addps %xmm7, %xmm9 #70.5 - movaps %xmm2, %xmm10 #70.5 - movaps %xmm3, %xmm11 #70.5 - shufps $238, %xmm8, %xmm2 #70.5 - shufps $238, %xmm9, %xmm3 #70.5 - movaps %xmm2, %xmm14 #70.5 - shufps $177, %xmm13, %xmm13 #70.5 - subps %xmm4, %xmm14 #70.5 - addps %xmm4, %xmm2 #70.5 - movaps %xmm3, %xmm4 #70.5 - subps %xmm13, %xmm3 #70.5 - addps %xmm13, %xmm4 #70.5 - movlhps %xmm8, %xmm10 #70.5 - movlhps %xmm9, %xmm11 #70.5 - movaps %xmm10, 32(%rdx,%r11,4) #70.5 - movaps %xmm11, 48(%rdx,%r11,4) #70.5 - movaps %xmm2, (%rdx,%r12,4) #70.5 - movaps %xmm3, 16(%rdx,%r12,4) #70.5 - movaps %xmm14, 32(%rdx,%r12,4) #70.5 - movaps %xmm4, 48(%rdx,%r12,4) #70.5 - -#ifdef __APPLE__ - .globl _leaf_end -_leaf_end: -#else - .globl leaf_end -leaf_end: -#endif - -#ifdef __APPLE__ - .globl _x_init -_x_init: -#else - .globl x_init -x_init: -#endif - #movaps L_sse_constants(%rip), %xmm3 #34.3 - movaps (%r9), %xmm3 #34.3 - movq 0x20(%rdi), %r8 -#ifdef __APPLE__ - .globl _x4 -_x4: -#else - .globl x4 -x4: -#endif - movaps 64(%rdx), %xmm0 #34.3 - movaps 96(%rdx), %xmm1 #34.3 - movaps (%rdx), %xmm7 #34.3 - movaps (%r8), %xmm4 #const - movaps %xmm7, %xmm9 #34.3 - movaps %xmm4, %xmm6 #34.3 - movaps 16(%r8), %xmm2 #const - mulps %xmm0, %xmm6 #34.3 - mulps %xmm1, %xmm4 #34.3 - shufps $177, %xmm0, %xmm0 #34.3 - shufps $177, %xmm1, %xmm1 #34.3 - mulps %xmm2, %xmm0 #34.3 - mulps %xmm1, %xmm2 #34.3 - subps %xmm0, %xmm6 #34.3 - addps %xmm2, %xmm4 #34.3 - movaps %xmm6, %xmm5 #34.3 - subps %xmm4, %xmm6 #34.3 - addps %xmm4, %xmm5 #34.3 - movaps 32(%rdx), %xmm8 #34.3 - xorps %xmm3, %xmm6 #34.3 - shufps $177, %xmm6, %xmm6 #34.3 - movaps %xmm8, %xmm10 #34.3 - movaps 112(%rdx), %xmm12 #34.3 - subps %xmm5, %xmm9 #34.3 - addps %xmm5, %xmm7 #34.3 - addps %xmm6, %xmm10 #34.3 - subps %xmm6, %xmm8 #34.3 - movaps %xmm7, (%rdx) #34.3 - movaps %xmm8, 32(%rdx) #34.3 - movaps %xmm9, 64(%rdx) #34.3 - movaps %xmm10, 96(%rdx) #34.3 - movaps 32(%r8), %xmm14 #const #34.3 - movaps 80(%rdx), %xmm11 #34.3 - movaps %xmm14, %xmm0 #34.3 - movaps 48(%r8), %xmm13 #const #34.3 - mulps %xmm11, %xmm0 #34.3 - mulps %xmm12, %xmm14 #34.3 - shufps $177, %xmm11, %xmm11 #34.3 - shufps $177, %xmm12, %xmm12 #34.3 - mulps %xmm13, %xmm11 #34.3 - mulps %xmm12, %xmm13 #34.3 - subps %xmm11, %xmm0 #34.3 - addps %xmm13, %xmm14 #34.3 - movaps %xmm0, %xmm15 #34.3 - subps %xmm14, %xmm0 #34.3 - addps %xmm14, %xmm15 #34.3 - xorps %xmm3, %xmm0 #34.3 - movaps 16(%rdx), %xmm1 #34.3 - movaps 48(%rdx), %xmm2 #34.3 - movaps %xmm1, %xmm4 #34.3 - shufps $177, %xmm0, %xmm0 #34.3 - movaps %xmm2, %xmm5 #34.3 - addps %xmm15, %xmm1 #34.3 - subps %xmm0, %xmm2 #34.3 - subps %xmm15, %xmm4 #34.3 - addps %xmm0, %xmm5 #34.3 - movaps %xmm1, 16(%rdx) #34.3 - movaps %xmm2, 48(%rdx) #34.3 - movaps %xmm4, 80(%rdx) #34.3 - movaps %xmm5, 112(%rdx) #34.3 - ret - -# _x8_soft + 5 needs to be 16 byte aligned -#ifdef __APPLE__ - .globl _x8_soft -_x8_soft: -#else - .globl x8_soft -x8_soft: -#endif - xorl %eax, %eax - movq %rdx, %rbx - movq %r8, %rsi - leaq (%rdx,%rcx,4), %r9 - leaq (%r9,%rcx,4), %r10 - leaq (%r10,%rcx,4), %r11 - leaq (%r11,%rcx,4), %r12 - leaq (%r12,%rcx,4), %r13 - leaq (%r13,%rcx,4), %r14 - leaq (%r14,%rcx,4), %r15 -X8_soft_loop: - movaps (%rsi), %xmm9 - movaps (%r10,%rax,4), %xmm6 - movaps %xmm9, %xmm11 - movaps (%r11,%rax,4), %xmm7 - movaps 16(%rsi), %xmm8 - mulps %xmm6, %xmm11 - mulps %xmm7, %xmm9 - shufps $177, %xmm6, %xmm6 - mulps %xmm8, %xmm6 - shufps $177, %xmm7, %xmm7 - subps %xmm6, %xmm11 - mulps %xmm7, %xmm8 - movaps %xmm11, %xmm10 - addps %xmm8, %xmm9 - movaps 32(%rsi), %xmm15 - addps %xmm9, %xmm10 - subps %xmm9, %xmm11 - movaps (%rbx,%rax,4), %xmm5 - movaps %xmm15, %xmm6 - movaps (%r12,%rax,4), %xmm12 - movaps %xmm5, %xmm2 - movaps (%r14,%rax,4), %xmm13 - xorps %xmm3, %xmm11 #const - movaps 48(%rsi), %xmm14 - subps %xmm10, %xmm2 - mulps %xmm12, %xmm6 - addps %xmm10, %xmm5 - mulps %xmm13, %xmm15 - movaps 64(%rsi), %xmm10 - movaps %xmm5, %xmm0 - shufps $177, %xmm12, %xmm12 - shufps $177, %xmm13, %xmm13 - mulps %xmm14, %xmm12 - mulps %xmm13, %xmm14 - subps %xmm12, %xmm6 - addps %xmm14, %xmm15 - movaps (%r13,%rax,4), %xmm7 - movaps %xmm10, %xmm13 - movaps (%r15,%rax,4), %xmm8 - movaps %xmm6, %xmm12 - movaps 80(%rsi), %xmm9 - addq $96, %rsi - mulps %xmm7, %xmm13 - subps %xmm15, %xmm6 - addps %xmm15, %xmm12 - mulps %xmm8, %xmm10 - subps %xmm12, %xmm0 - addps %xmm12, %xmm5 - shufps $177, %xmm7, %xmm7 - xorps %xmm3, %xmm6 #const - shufps $177, %xmm8, %xmm8 - movaps %xmm2, %xmm12 - mulps %xmm9, %xmm7 - mulps %xmm8, %xmm9 - subps %xmm7, %xmm13 - addps %xmm9, %xmm10 - movaps (%r9,%rax,4), %xmm4 - shufps $177, %xmm11, %xmm11 - movaps %xmm4, %xmm1 - shufps $177, %xmm6, %xmm6 - addps %xmm11, %xmm1 - subps %xmm11, %xmm4 - addps %xmm6, %xmm12 - subps %xmm6, %xmm2 - movaps %xmm13, %xmm11 - movaps %xmm4, %xmm14 - movaps %xmm1, %xmm6 - subps %xmm10, %xmm13 - addps %xmm10, %xmm11 - xorps %xmm3, %xmm13 #const - addps %xmm11, %xmm4 - subps %xmm11, %xmm14 - shufps $177, %xmm13, %xmm13 - movaps %xmm5, (%rbx,%rax,4) - movaps %xmm4, (%r9,%rax,4) - movaps %xmm2, (%r10,%rax,4) - subps %xmm13, %xmm1 - addps %xmm13, %xmm6 - movaps %xmm1, (%r11,%rax,4) - movaps %xmm0, (%r12,%rax,4) - movaps %xmm14, (%r13,%rax,4) - movaps %xmm12, (%r14,%rax,4) - movaps %xmm6, (%r15,%rax,4) - addq $4, %rax - cmpq %rcx, %rax - jne X8_soft_loop - ret - -#ifdef __APPLE__ - .globl _x8_soft_end -_x8_soft_end: -#else - .globl x8_soft_end -x8_soft_end: -#endif - -#ifdef __APPLE__ - .globl _x8_hard -_x8_hard: -#else - .globl x8_hard -x8_hard: -#endif - movaps (%r9), %xmm5 -X8_loop: - movaps (%r8), %xmm9 -X8_const_2: - movaps 0xFECA(%rdx,%rax,4), %xmm6 - movaps %xmm9, %xmm11 -X8_const_3: - movaps 0xFECA(%rdx,%rax,4), %xmm7 - movaps 16(%r8), %xmm8 - mulps %xmm6, %xmm11 - mulps %xmm7, %xmm9 - shufps $177, %xmm6, %xmm6 - mulps %xmm8, %xmm6 - shufps $177, %xmm7, %xmm7 - subps %xmm6, %xmm11 - mulps %xmm7, %xmm8 - movaps %xmm11, %xmm10 - addps %xmm8, %xmm9 - movaps 32(%r8), %xmm15 - addps %xmm9, %xmm10 - subps %xmm9, %xmm11 -X8_const_0: - movaps 0xFECA(%rdx,%rax,4), %xmm3 - movaps %xmm15, %xmm6 -X8_const_4: - movaps 0xFECA(%rdx,%rax,4), %xmm12 - movaps %xmm3, %xmm2 -X8_const_6: - movaps 0xFECA(%rdx,%rax,4), %xmm13 - xorps %xmm5, %xmm11 - movaps 48(%r8), %xmm14 - subps %xmm10, %xmm2 - mulps %xmm12, %xmm6 - addps %xmm10, %xmm3 - mulps %xmm13, %xmm15 - movaps 64(%r8), %xmm10 - movaps %xmm3, %xmm0 - shufps $177, %xmm12, %xmm12 - shufps $177, %xmm13, %xmm13 - mulps %xmm14, %xmm12 - mulps %xmm13, %xmm14 - subps %xmm12, %xmm6 - addps %xmm14, %xmm15 -X8_const_5: - movaps 0xFECA(%rdx,%rax,4), %xmm7 - movaps %xmm10, %xmm13 -X8_const_7: - movaps 0xFECA(%rdx,%rax,4), %xmm8 - movaps %xmm6, %xmm12 - movaps 80(%r8), %xmm9 - addq $96, %r8 - mulps %xmm7, %xmm13 - subps %xmm15, %xmm6 - addps %xmm15, %xmm12 - mulps %xmm8, %xmm10 - subps %xmm12, %xmm0 - addps %xmm12, %xmm3 - shufps $177, %xmm7, %xmm7 - xorps %xmm5, %xmm6 - shufps $177, %xmm8, %xmm8 - movaps %xmm2, %xmm12 - mulps %xmm9, %xmm7 - mulps %xmm8, %xmm9 - subps %xmm7, %xmm13 - addps %xmm9, %xmm10 -X8_const_1: - movaps 0xFECA(%rdx,%rax,4), %xmm4 - shufps $177, %xmm11, %xmm11 - movaps %xmm4, %xmm1 - shufps $177, %xmm6, %xmm6 - addps %xmm11, %xmm1 - subps %xmm11, %xmm4 - addps %xmm6, %xmm12 - subps %xmm6, %xmm2 - movaps %xmm13, %xmm11 - movaps %xmm4, %xmm14 - movaps %xmm1, %xmm6 - subps %xmm10, %xmm13 - addps %xmm10, %xmm11 - xorps %xmm5, %xmm13 - addps %xmm11, %xmm4 - subps %xmm11, %xmm14 - shufps $177, %xmm13, %xmm13 -X8_const1_0: - movaps %xmm3, 0xFECA(%rdx,%rax,4) -X8_const1_1: - movaps %xmm4, 0xFECA(%rdx,%rax,4) -X8_const1_2: - movaps %xmm2, 0xFECA(%rdx,%rax,4) - subps %xmm13, %xmm1 - addps %xmm13, %xmm6 -X8_const1_3: - movaps %xmm1, 0xFECA(%rdx,%rax,4) -X8_const1_4: - movaps %xmm0, 0xFECA(%rdx,%rax,4) -X8_const1_5: - movaps %xmm14, 0xFECA(%rdx,%rax,4) -X8_const1_6: - movaps %xmm12, 0xFECA(%rdx,%rax,4) -X8_const1_7: - movaps %xmm6, 0xFECA(%rdx,%rax,4) - addq $4, %rax - cmpq %rcx, %rax - jne X8_loop - -#ifdef __APPLE__ - .globl _sse_leaf_ee_offsets - .globl _sse_leaf_oo_offsets - .globl _sse_leaf_eo_offsets - .globl _sse_leaf_oe_offsets - .align 4 -_sse_leaf_ee_offsets: - .long LEAF_EE_const_0-_leaf_ee+0x4 - .long LEAF_EE_const_1-_leaf_ee+0x5 - .long LEAF_EE_const_2-_leaf_ee+0x5 - .long LEAF_EE_const_3-_leaf_ee+0x5 - .long LEAF_EE_const_4-_leaf_ee+0x5 - .long LEAF_EE_const_5-_leaf_ee+0x5 - .long LEAF_EE_const_6-_leaf_ee+0x4 - .long LEAF_EE_const_7-_leaf_ee+0x5 -_sse_leaf_oo_offsets: - .long LEAF_OO_const_0-_leaf_oo+0x4 - .long LEAF_OO_const_1-_leaf_oo+0x4 - .long LEAF_OO_const_2-_leaf_oo+0x5 - .long LEAF_OO_const_3-_leaf_oo+0x5 - .long LEAF_OO_const_4-_leaf_oo+0x4 - .long LEAF_OO_const_5-_leaf_oo+0x5 - .long LEAF_OO_const_6-_leaf_oo+0x5 - .long LEAF_OO_const_7-_leaf_oo+0x5 -_sse_leaf_eo_offsets: - .long LEAF_EO_const_0-_leaf_eo+0x5 - .long LEAF_EO_const_1-_leaf_eo+0x4 - .long LEAF_EO_const_2-_leaf_eo+0x4 - .long LEAF_EO_const_3-_leaf_eo+0x4 - .long LEAF_EO_const_4-_leaf_eo+0x5 - .long LEAF_EO_const_5-_leaf_eo+0x5 - .long LEAF_EO_const_6-_leaf_eo+0x4 - .long LEAF_EO_const_7-_leaf_eo+0x5 -_sse_leaf_oe_offsets: - .long LEAF_OE_const_0-_leaf_oe+0x5 - .long LEAF_OE_const_1-_leaf_oe+0x4 - .long LEAF_OE_const_2-_leaf_oe+0x4 - .long LEAF_OE_const_3-_leaf_oe+0x5 - .long LEAF_OE_const_4-_leaf_oe+0x5 - .long LEAF_OE_const_5-_leaf_oe+0x5 - .long LEAF_OE_const_6-_leaf_oe+0x4 - .long LEAF_OE_const_7-_leaf_oe+0x4 -#else - .globl sse_leaf_ee_offsets - .globl sse_leaf_oo_offsets - .globl sse_leaf_eo_offsets - .globl sse_leaf_oe_offsets - .align 4 -sse_leaf_ee_offsets: - .long LEAF_EE_const_0-leaf_ee+0x4 - .long LEAF_EE_const_1-leaf_ee+0x5 - .long LEAF_EE_const_2-leaf_ee+0x5 - .long LEAF_EE_const_3-leaf_ee+0x5 - .long LEAF_EE_const_4-leaf_ee+0x5 - .long LEAF_EE_const_5-leaf_ee+0x5 - .long LEAF_EE_const_6-leaf_ee+0x4 - .long LEAF_EE_const_7-leaf_ee+0x5 -sse_leaf_oo_offsets: - .long LEAF_OO_const_0-leaf_oo+0x4 - .long LEAF_OO_const_1-leaf_oo+0x4 - .long LEAF_OO_const_2-leaf_oo+0x5 - .long LEAF_OO_const_3-leaf_oo+0x5 - .long LEAF_OO_const_4-leaf_oo+0x4 - .long LEAF_OO_const_5-leaf_oo+0x5 - .long LEAF_OO_const_6-leaf_oo+0x5 - .long LEAF_OO_const_7-leaf_oo+0x5 -sse_leaf_eo_offsets: - .long LEAF_EO_const_0-leaf_eo+0x5 - .long LEAF_EO_const_1-leaf_eo+0x4 - .long LEAF_EO_const_2-leaf_eo+0x4 - .long LEAF_EO_const_3-leaf_eo+0x4 - .long LEAF_EO_const_4-leaf_eo+0x5 - .long LEAF_EO_const_5-leaf_eo+0x5 - .long LEAF_EO_const_6-leaf_eo+0x4 - .long LEAF_EO_const_7-leaf_eo+0x5 -sse_leaf_oe_offsets: - .long LEAF_OE_const_0-leaf_oe+0x5 - .long LEAF_OE_const_1-leaf_oe+0x4 - .long LEAF_OE_const_2-leaf_oe+0x4 - .long LEAF_OE_const_3-leaf_oe+0x5 - .long LEAF_OE_const_4-leaf_oe+0x5 - .long LEAF_OE_const_5-leaf_oe+0x5 - .long LEAF_OE_const_6-leaf_oe+0x4 - .long LEAF_OE_const_7-leaf_oe+0x4 -#endif - -#ifdef __APPLE__ - .data -#else - .section .data -#endif - .p2align 4 -#ifdef __APPLE__ - .globl _sse_constants -_sse_constants: -#else - .globl sse_constants -sse_constants: -#endif - .long 0x00000000,0x80000000,0x00000000,0x80000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0xbf3504f3,0x3f3504f3,0xbf3504f3,0x3f3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0xbf3504f3,0x3f3504f3 -#ifdef __APPLE__ - .globl _sse_constants_inv -_sse_constants_inv: -#else - .globl sse_constants_inv -sse_constants_inv: -#endif - .long 0x80000000,0x00000000,0x80000000,0x00000000 - .long 0x3f3504f3,0x3f3504f3,0x3f3504f3,0x3f3504f3 - .long 0x3f3504f3,0xbf3504f3,0x3f3504f3,0xbf3504f3 - .long 0x3f800000,0x3f800000,0x3f3504f3,0x3f3504f3 - .long 0x00000000,0x00000000,0x3f3504f3,0xbf3504f3 -- cgit v1.1 From d8cbe74acac7a867777f407309421b8065705ebf Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 18 Mar 2015 18:24:11 +0200 Subject: Minimize sin/cos calculations by calculating all factors ones and generate lookup tables by mapping --- src/codegen_sse.h | 9 ++++----- src/ffts.c | 39 +++++++++++++++++++++++++++------------ 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index c518481..c0a34fe 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -38,7 +38,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "arch/x64/x64-codegen.h" #include -#include static const FFTS_ALIGN(16) unsigned int sse_constants[20] = { /* 0.0, -0.0, 0.0, -0.0 */ @@ -741,12 +740,12 @@ generate_leaf_eo(insns_t **fp, uint32_t *offsets) insns_t *ins = *fp; #ifdef _M_X64 - x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2); - x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM9, X64_RDX, offsets[0], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM7, X64_RDX, offsets[2], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM11, X64_XMM9); - x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM5, X64_RDX, offsets[3], X64_RAX, 2); x64_sse_movaps_reg_reg(ins, X64_XMM6, X64_XMM7); - x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2); + x64_sse_movaps_reg_memindex(ins, X64_XMM4, X64_RDX, offsets[1], X64_RAX, 2); x64_sse_subps_reg_reg(ins, X64_XMM7, X64_XMM5); x64_sse_addps_reg_reg(ins, X64_XMM11, X64_XMM4); x64_sse_subps_reg_reg(ins, X64_XMM9, X64_XMM4); diff --git a/src/ffts.c b/src/ffts.c index 2b6b647..41df886 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -205,7 +205,9 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) V4SF MULI_SIGN; size_t n_luts; ffts_cpx_32f *w; - size_t i, n; + ffts_cpx_32f *tmp; + size_t i, j, m, n; + int stride; if (sign < 0) { MULI_SIGN = V4SF_LIT4(-0.0f, 0.0f, -0.0f, 0.0f); @@ -246,19 +248,28 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) V4SF neg = (sign < 0) ? V4SF_LIT4(0.0f, 0.0f, 0.0f, 0.0f) : V4SF_LIT4(-0.0f, -0.0f, -0.0f, -0.0f); #endif + /* calculate factors */ + m = leaf_N << (n_luts - 2); + tmp = FFTS_MALLOC(m * sizeof(ffts_cpx_32f), 32); + + for (i = 0; i < m; i++) { + tmp[i][0] = W_re(4*m, i); + tmp[i][1] = W_im(4*m, i); + } + + /* generate lookup tables */ + stride = 1 << (n_luts - 1); for (i = 0; i < n_luts; i++) { p->ws_is[i] = w - (ffts_cpx_32f*) p->ws; - //fprintf(stderr, "LUT[%zu] = %d @ %08x - %zu\n", i, n, w, p->ws_is[i]); if (!i) { ffts_cpx_32f *w0 = FFTS_MALLOC(n/4 * sizeof(ffts_cpx_32f), 32); float *fw0 = (float*) w0; float *fw = (float*) w; - size_t j; for (j = 0; j < n/4; j++) { - w0[j][0] = W_re(n, j); - w0[j][1] = W_im(n, j); + w0[j][0] = tmp[j * stride][0]; + w0[j][1] = tmp[j * stride][1]; } #if defined(__arm__) && !defined(DYNAMIC_DISABLED) @@ -301,14 +312,15 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) float *fw = (float *)w; - size_t j; for (j = 0; j < n/8; j++) { - w0[j][0] = W_re((float) n, (float) 2*j); - w0[j][1] = W_im((float) n, (float) 2*j); - w1[j][0] = W_re((float) n, (float) j); - w1[j][1] = W_im((float) n, (float) j); - w2[j][0] = W_re((float) n, (float) (j + (n/8))); - w2[j][1] = W_im((float) n, (float) (j + (n/8))); + w0[j][0] = tmp[2 * j * stride][0]; + w0[j][1] = tmp[2 * j * stride][1]; + + w1[j][0] = tmp[j * stride][0]; + w1[j][1] = tmp[j * stride][1]; + + w2[j][0] = tmp[(j + (n/8)) * stride][0]; + w2[j][1] = tmp[(j + (n/8)) * stride][1]; } #if defined(__arm__) && !defined(DYNAMIC_DISABLED) @@ -374,6 +386,7 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) } n *= 2; + stride >>= 1; } #if defined(__arm__) && !defined(DYNAMIC_DISABLED) @@ -388,6 +401,8 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) } #endif + FFTS_FREE(tmp); + p->lastlut = w; p->n_luts = n_luts; return 0; -- cgit v1.1 From 2014019a6760e712a019ba639e76aaa8729d8843 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 19 Mar 2015 13:38:40 +0200 Subject: To support building for Windows with MinGW, don't assume MSVC to be the compiler --- src/ffts_internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 3e788f8..f992811 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -184,7 +184,7 @@ struct _ffts_plan_t { static FFTS_INLINE void *ffts_aligned_malloc(size_t size) { -#if defined(_MSC_VER) +#if defined(_WIN32) return _aligned_malloc(size, 32); #else return valloc(size); -- cgit v1.1 From 93a58ef7e0b973411bbed3e07750c7d1fc1e40d5 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 19 Mar 2015 13:51:05 +0200 Subject: ffts_nd.c is using SSE2 intrinsics, detect and include emmintrin.h instead xmmintrin.h, and fix GCC error: inlining failed in call to always_inline '_mm_load_pd': target specific option mismatch by adding "-msse2" instead of "-msse" --- CMakeLists.txt | 12 ++++++++++-- src/ffts_nd.c | 4 ++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d83367e..5a1a897 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -136,6 +136,12 @@ if(NOT CMAKE_CROSSCOMPILING) if(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) endif(HAVE_XMMINTRIN_H) + + # check if the platform has support for SSE2 SIMD extension + check_include_file(emmintrin.h HAVE_EMMINTRIN_H) + if(HAVE_EMMINTRIN_H) + add_definitions(-DHAVE_SSE2) + endif(HAVE_EMMINTRIN_H) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") else() # Check if we can always use detection code above? @@ -160,9 +166,11 @@ elseif(CMAKE_COMPILER_IS_GNUCC) list(APPEND FFTS_EXTRA_LIBRARIES m) endif(HAVE_LIBM) - if(HAVE_XMMINTRIN_H) + if(HAVE_EMMINTRIN_H) + add_definitions(-msse2) + elseif(HAVE_XMMINTRIN_H) add_definitions(-msse) - endif(HAVE_XMMINTRIN_H) + endif(HAVE_EMMINTRIN_H) endif(MSVC) include_directories(include) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 839e35b..23338c1 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -37,8 +37,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_NEON #include "neon.h" #include -#elif HAVE_SSE -#include +#elif HAVE_SSE2 +#include #endif #define TSIZE 8 -- cgit v1.1 From dfab21f8096660f441fb33bf5012e7f2c3652fa9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 31 Mar 2015 16:47:06 +0300 Subject: Generate cosine and sine table without using C math library. About 100 times faster on ARM and 15 times faster on x86. --- src/ffts.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++----- src/ffts_internal.h | 10 --------- 2 files changed, 54 insertions(+), 15 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index 41df886..f9cb9bb 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -199,6 +199,58 @@ void ffts_free_1d(ffts_plan_t *p) free(p); } +static void +ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size) +{ + double alpha, beta; + double c[2], s[2]; + int i; + + double x = 1.0 / table_size; + double z = x * x; + + /* polynomial approximations calculated using Sollya */ + + /* alpha = 2 * sin(M_PI_4 / m) * sin(M_PI_4 / m) */ + alpha = x * (1.1107207345394952717884501203293686870741139540138 + + z * (-0.114191397993514079911985272577099412137126013186879 + + z * 3.52164670852685621720746817665316575239342815885835e-3)); + alpha = alpha * alpha; + + /* beta = sin(M_PI_2 / m) */ + beta = x * (1.57079632679489455959753740899031981825828552246094 + + z * (-0.64596409735041482313988581154262647032737731933593 + + z * 7.9690915468332887416913479228242067620158195495605e-2)); + + /* cos(0) = 1.0, sin(0) = 0.0 */ + c[0] = 1.0; + s[0] = 0.0; + + table[ 0][0] = 1.0f; + table[ 0][1] = 0.0f; + table[table_size - 1][1] = -1.0f; + table[table_size - 1][0] = -0.0f; + + /* generate sine and cosine table with maximum error less than 1 ULP */ + for (i = 1; i < table_size/2; i += 2) { + c[1] = c[0] - ((alpha * c[0]) + (beta * s[0])); + s[1] = s[0] - ((alpha * s[0]) - (beta * c[0])); + + table[i + 0][0] = (float) c[1]; + table[i + 0][1] = (float) -s[1]; + table[table_size - i][0] = (float) s[1]; + table[table_size - i][1] = (float) -c[1]; + + c[0] = c[1] - ((alpha * c[1]) + (beta * s[1])); + s[0] = s[1] - ((alpha * s[1]) - (beta * c[1])); + + table[i + 1][0] = (float) c[0]; + table[i + 1][1] = (float) -s[0]; + table[table_size - i - 1][0] = (float) s[0]; + table[table_size - i - 1][1] = (float) -c[0]; + } +} + static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { @@ -252,12 +304,9 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) m = leaf_N << (n_luts - 2); tmp = FFTS_MALLOC(m * sizeof(ffts_cpx_32f), 32); - for (i = 0; i < m; i++) { - tmp[i][0] = W_re(4*m, i); - tmp[i][1] = W_im(4*m, i); - } + ffts_generate_cosine_sine_32f(tmp, m); - /* generate lookup tables */ + /* generate lookup tables */ stride = 1 << (n_luts - 1); for (i = 0; i < n_luts; i++) { p->ws_is[i] = w - (ffts_cpx_32f*) p->ws; diff --git a/src/ffts_internal.h b/src/ffts_internal.h index f992811..60de539 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -223,14 +223,4 @@ static __inline unsigned long ffts_ctzl(size_t N) #endif /* _WIN64 */ #endif /* _MSC_VER */ -static FFTS_ALWAYS_INLINE float W_re(float N, float k) -{ - return cos(-2.0 * M_PI * k / N); -} - -static FFTS_ALWAYS_INLINE float W_im(float N, float k) -{ - return sin(-2.0 * M_PI * k / N); -} - #endif /* FFTS_INTERNAL_H */ -- cgit v1.1 From 502acf15c749a6d32680a5843da79934f2d985d4 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 2 Jul 2015 16:52:05 +0300 Subject: Fix assertion failed in ffts_compare_offsets --- src/patterns.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/patterns.c b/src/patterns.c index 17133d1..04bdccb 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -159,11 +159,12 @@ static void ffts_elaborate_offsets(ptrdiff_t *offsets, int leafN, int N, int iof } } -static int ffts_compare_offsets(const void *a, const void *b) +static int +ffts_compare_offsets(const void *pa, const void *pb) { - ptrdiff_t diff = ((ptrdiff_t*) a)[0] - ((ptrdiff_t*) b)[0]; - assert(diff > INT_MIN && diff < INT_MAX); - return (int) diff; + const ptrdiff_t a = *(const ptrdiff_t*) pa; + const ptrdiff_t b = *(const ptrdiff_t*) pb; + return (a > b) - (a < b); } ptrdiff_t *ffts_init_offsets(size_t N, size_t leaf_N) -- cgit v1.1 From 8f0c8c7f7243a1255cfcc4d63a169c21a61200fc Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 2 Jul 2015 17:04:44 +0300 Subject: Incorrect stride with GCC flags "-march=native -ffast-math" Note that N/leaf_N is always a multiply of 2 --- src/ffts_internal.h | 5 +++-- src/patterns.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 60de539..18b3bd5 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -79,7 +79,8 @@ #define M_PI 3.1415926535897932384626433832795028841971693993751058209 #endif -typedef void (*transform_func_t)(ffts_plan_t *p, const void *in, void *out); +struct _ffts_plan_t; +typedef void (*transform_func_t)(struct _ffts_plan_t *p, const void *in, void *out); /** * Contains all the Information need to perform FFT @@ -172,7 +173,7 @@ struct _ffts_plan_t { * to clean up the plan after use * (differs for real and multi dimension transforms */ - void (*destroy)(ffts_plan_t *); + void (*destroy)(struct _ffts_plan_t *); /** * Coefficiants for the real valued transforms diff --git a/src/patterns.c b/src/patterns.c index 04bdccb..158ff89 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -32,6 +32,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "patterns.h" +#include "ffts_internal.h" #include #include @@ -113,7 +114,7 @@ static void ffts_hardcodedleaf_is_rec(ptrdiff_t **is, int big_N, int N, int poff ptrdiff_t *ffts_init_is(size_t N, size_t leaf_N, int VL) { int i, i0, i1, i2; - int stride = (int) (log(N/leaf_N) / log(2)); + int stride = ffts_ctzl(N/leaf_N); ptrdiff_t *is, *pis; is = malloc(N / VL * sizeof(*is)); -- cgit v1.1 From b481f5980e000a4ff5e123e8165e52b6dea6ea54 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 6 Jul 2015 11:51:59 +0300 Subject: Fix ffts_aligned_free MinGW crash --- src/ffts_internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 18b3bd5..2d1dbd3 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -194,7 +194,7 @@ static FFTS_INLINE void *ffts_aligned_malloc(size_t size) static FFTS_INLINE void ffts_aligned_free(void *p) { -#if defined(_MSC_VER) +#if defined(_WIN32) _aligned_free(p); #else free(p); -- cgit v1.1 From ceb8e6aef7f0e406ff4724896a8138bf72911a68 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 6 Jul 2015 12:02:33 +0300 Subject: Avoid allocating array of single pointer --- src/ffts_real.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index 12c02b9..5522f6b 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -4,6 +4,7 @@ This file is part of FFTS -- The Fastest Fourier Transform in the South Copyright (c) 2012, Anthony M. Blake Copyright (c) 2012, The University of Waikato +Copyright (c) 2015, Jukka Ojanen All rights reserved. @@ -40,7 +41,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #endif -static void ffts_free_1d_real(ffts_plan_t *p) +static void +ffts_free_1d_real(ffts_plan_t *p) { if (p->B) { ffts_aligned_free(p->B); @@ -54,9 +56,8 @@ static void ffts_free_1d_real(ffts_plan_t *p) ffts_aligned_free(p->buf); } - if (p->plans) { + if (p->plans[0]) { ffts_free(p->plans[0]); - free(p->plans); } free(p); @@ -207,12 +208,13 @@ static void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout p->plans[0]->transform(p->plans[0], buf, out); } -ffts_plan_t *ffts_init_1d_real(size_t N, int sign) +ffts_plan_t* +ffts_init_1d_real(size_t N, int sign) { ffts_plan_t *p; size_t i; - p = (ffts_plan_t*) calloc(1, sizeof(*p)); + p = (ffts_plan_t*) calloc(1, sizeof(*p) + sizeof(*p->plans)); if (!p) { return NULL; } @@ -226,11 +228,7 @@ ffts_plan_t *ffts_init_1d_real(size_t N, int sign) p->destroy = &ffts_free_1d_real; p->N = N; p->rank = 1; - - p->plans = (ffts_plan_t**) malloc(1 * sizeof(*p->plans)); - if (!p->plans) { - goto cleanup; - } + p->plans = (ffts_plan_t**) &p[1]; p->plans[0] = ffts_init_1d(N/2, sign); if (!p->plans[0]) { -- cgit v1.1 From fbcfb21e9de85b6443848c721523d3793ae668ff Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 6 Jul 2015 12:08:32 +0300 Subject: Add new attributes to help auto-vectorization --- src/ffts_attributes.h | 26 ++++++++++++++++++++++++-- src/ffts_real.c | 47 ++++++++++++++++++++++++++++++----------------- 2 files changed, 54 insertions(+), 19 deletions(-) diff --git a/src/ffts_attributes.h b/src/ffts_attributes.h index 6ac2ac3..763a6af 100644 --- a/src/ffts_attributes.h +++ b/src/ffts_attributes.h @@ -68,10 +68,32 @@ #define FFTS_INLINE inline #endif -#if defined(_MSC_VER) +#if defined(__GNUC__) +#define FFTS_RESTRICT __restrict +#elif defined(_MSC_VER) +#define FFTS_RESTRICT __restrict +#else #define FFTS_RESTRICT +#endif + +#if GCC_VERSION_AT_LEAST(4,5) +#define FFTS_ASSUME(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) +#elif defined(_MSC_VER) +#define FFTS_ASSUME(cond) __assume(cond) #else -#define FFTS_RESTRICT __restrict +#define FFTS_ASSUME(cond) +#endif + +#if GCC_VERSION_AT_LEAST(4,7) +#define FFTS_ASSUME_ALIGNED_16(x) __builtin_assume_aligned(x, 16) +#else +#define FFTS_ASSUME_ALIGNED_16(x) x +#endif + +#if GCC_VERSION_AT_LEAST(4,7) +#define FFTS_ASSUME_ALIGNED_32(x) __builtin_assume_aligned(x, 32) +#else +#define FFTS_ASSUME_ALIGNED_32(x) x #endif #endif /* FFTS_ATTRIBUTES_H */ diff --git a/src/ffts_real.c b/src/ffts_real.c index 5522f6b..82a9e79 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -63,13 +63,19 @@ ffts_free_1d_real(ffts_plan_t *p) free(p); } -static void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) +static void +ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) { - float *out = (float*) vout; - float *buf = (float*) p->buf; - float *A = p->A; - float *B = p->B; - size_t N = p->N; + float *const FFTS_RESTRICT out = + (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_16(output); + float *const FFTS_RESTRICT buf = + (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->buf); + const float *const FFTS_RESTRICT A = + (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->A); + const float *const FFTS_RESTRICT B = + (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->B); + const int N = (const int) p->N; + int i; #ifdef __ARM_NEON__ float *p_buf0 = buf; @@ -77,9 +83,10 @@ static void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) float *p_out = out; #endif - size_t i; + /* we know this */ + FFTS_ASSUME(N/2 > 0); - p->plans[0]->transform(p->plans[0], vin, buf); + p->plans[0]->transform(p->plans[0], input, buf); buf[N + 0] = buf[0]; buf[N + 1] = buf[1]; @@ -138,14 +145,19 @@ static void ffts_execute_1d_real(ffts_plan_t *p, const void *vin, void *vout) out[N + 1] = 0.0f; } -static void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout) +static void +ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) { - float *out = (float*) vout; - float *in = (float*) vin; - float *buf = (float*) p->buf; - float *A = p->A; - float *B = p->B; - size_t N = p->N; + float *const FFTS_RESTRICT in = + (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_16(input); + float *const FFTS_RESTRICT buf = + (float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->buf); + const float *const FFTS_RESTRICT A = + (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->A); + const float *const FFTS_RESTRICT B = + (const float *const FFTS_RESTRICT) FFTS_ASSUME_ALIGNED_32(p->B); + const int N = (const int) p->N; + int i; #ifdef __ARM_NEON__ float *p_buf0 = in; @@ -153,7 +165,8 @@ static void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout float *p_out = buf; #endif - size_t i; + /* we know this */ + FFTS_ASSUME(N/2 > 0); #ifdef __ARM_NEON__ for (i = 0; i < N/2; i += 2) { @@ -205,7 +218,7 @@ static void ffts_execute_1d_real_inv(ffts_plan_t *p, const void *vin, void *vout } #endif - p->plans[0]->transform(p->plans[0], buf, out); + p->plans[0]->transform(p->plans[0], buf, output); } ffts_plan_t* -- cgit v1.1 From 6bf4e36dd29a12136f018c208f830dbaac05f182 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 6 Jul 2015 12:10:17 +0300 Subject: SSE optimized versions of ffts_execute_1d_real and ffts_execute_1d_real_inv --- src/ffts_real.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 100 insertions(+), 4 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index 82a9e79..0dd24d8 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -134,10 +134,58 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } +#elif HAVE_SSE + if (N < 8) { + for (i = 0; i < N/2; i++) { + out[2*i + 0] = + buf[ 2*i + 0] * A[2*i + 0] - buf[ 2*i + 1] * A[2*i + 1] + + buf[N - 2*i + 0] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; + out[2*i + 1] = + buf[ 2*i + 1] * A[2*i + 0] + buf[ 2*i + 0] * A[2*i + 1] + + buf[N - 2*i + 0] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; + } + } else { + const __m128 c0 = _mm_set_ps(0.0f, -0.0f, 0.0f, -0.0f); + __m128 t0 = _mm_load_ps(buf); + + for (i = 0; i < N; i += 8) { + __m128 t1 = _mm_load_ps(buf + i); + __m128 t2 = _mm_load_ps(buf + N - i - 4); + __m128 t3 = _mm_load_ps(A + i); + __m128 t4 = _mm_load_ps(B + i); + + _mm_store_ps(out + i, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0), + _MM_SHUFFLE(2,3,0,1))))); + + t0 = _mm_load_ps(buf + N - i - 8); + t1 = _mm_load_ps(buf + i + 4); + t3 = _mm_load_ps(A + i + 4); + t4 = _mm_load_ps(B + i + 4); + + _mm_store_ps(out + i + 4, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0), + _MM_SHUFFLE(2,3,0,1))))); + } + } #else for (i = 0; i < N/2; i++) { - out[2*i + 0] = buf[2*i + 0] * A[2*i] - buf[2*i + 1] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; - out[2*i + 1] = buf[2*i + 1] * A[2*i] + buf[2*i + 0] * A[2*i + 1] + buf[N - 2*i] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; + out[2*i + 0] = + buf[ 2*i + 0] * A[2*i + 0] - buf[ 2*i + 1] * A[2*i + 1] + + buf[N - 2*i + 0] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; + out[2*i + 1] = + buf[ 2*i + 1] * A[2*i + 0] + buf[ 2*i + 0] * A[2*i + 1] + + buf[N - 2*i + 0] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; } #endif @@ -211,10 +259,58 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } +#elif HAVE_SSE + if (N < 8) { + for (i = 0; i < N/2; i++) { + buf[2*i + 0] = + in[ 2*i + 0] * A[2*i + 0] + in[ 2*i + 1] * A[2*i + 1] + + in[N - 2*i + 0] * B[2*i + 0] - in[N - 2*i + 1] * B[2*i + 1]; + buf[2*i + 1] = + in[ 2*i + 1] * A[2*i + 0] - in[ 2*i + 0] * A[2*i + 1] - + in[N - 2*i + 0] * B[2*i + 1] - in[N - 2*i + 1] * B[2*i + 0]; + } + } else { + const __m128 c0 = _mm_set_ps(-0.0f, 0.0f, -0.0f, 0.0f); + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); + + for (i = 0; i < N; i += 8) { + __m128 t1 = _mm_load_ps(in + i); + __m128 t2 = _mm_load_ps(in + N - i - 4); + __m128 t3 = _mm_load_ps(A + i); + __m128 t4 = _mm_load_ps(B + i); + + _mm_store_ps(buf + i, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); + + t0 = _mm_load_ps(in + N - i - 8); + t1 = _mm_load_ps(in + i + 4); + t3 = _mm_load_ps(A + i + 4); + t4 = _mm_load_ps(B + i + 4); + + _mm_store_ps(buf + i + 4, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); + } + } #else for (i = 0; i < N/2; i++) { - buf[2*i + 0] = in[2*i + 0] * A[2*i] + in[2*i + 1] * A[2*i + 1] + in[N - 2*i] * B[2*i + 0] - in[N - 2*i + 1] * B[2*i + 1]; - buf[2*i + 1] = in[2*i + 1] * A[2*i] - in[2*i + 0] * A[2*i + 1] - in[N - 2*i] * B[2*i + 1] - in[N - 2*i + 1] * B[2*i + 0]; + buf[2*i + 0] = + in[ 2*i + 0] * A[2*i + 0] + in[ 2*i + 1] * A[2*i + 1] + + in[N - 2*i + 0] * B[2*i + 0] - in[N - 2*i + 1] * B[2*i + 1]; + buf[2*i + 1] = + in[ 2*i + 1] * A[2*i + 0] - in[ 2*i + 0] * A[2*i + 1] - + in[N - 2*i + 0] * B[2*i + 1] - in[N - 2*i + 1] * B[2*i + 0]; } #endif -- cgit v1.1 From a0aa64fdb9a5fcbdf8ad3edfe3f2b1bc4e37c770 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 6 Jul 2015 14:28:45 +0300 Subject: To silence warning 'possible loss of data', use explicit casting to float --- src/ffts_real.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index 0dd24d8..f3b5126 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -361,17 +361,17 @@ ffts_init_1d_real(size_t N, int sign) if (sign < 0) { for (i = 0; i < N/2; i++) { - p->A[2 * i + 0] = 0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i)); - p->A[2 * i + 1] = 0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i)); - p->B[2 * i + 0] = 0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i)); - p->B[2 * i + 1] = 0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + p->A[2 * i + 0] = (float) (0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); + p->A[2 * i + 1] = (float) (0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->B[2 * i + 0] = (float) (0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); + p->B[2 * i + 1] = (float) (0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); } } else { for (i = 0; i < N/2; i++) { - p->A[2 * i + 0] = 1.0 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i)); - p->A[2 * i + 1] = 1.0 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i)); - p->B[2 * i + 0] = 1.0 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i)); - p->B[2 * i + 1] = 1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i)); + p->A[2 * i + 0] = (float) (1.0 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); + p->A[2 * i + 1] = (float) (1.0 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->B[2 * i + 0] = (float) (1.0 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); + p->B[2 * i + 1] = (float) (1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); } } -- cgit v1.1 From 8c3b06d4790ef37d541212bdc689f5b0ecab7245 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 7 Jul 2015 12:45:10 +0300 Subject: Add detection for SSE3 intrinsics --- CMakeLists.txt | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a1a897..43b6add 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,17 +131,35 @@ if(NOT CMAKE_CROSSCOMPILING) cmake_pop_check_state() else() - # check if the platform has support for SSE SIMD extension + # check if the platform has support for SSE intrinsics check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) if(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) endif(HAVE_XMMINTRIN_H) - # check if the platform has support for SSE2 SIMD extension + # check if the platform has support for SSE2 intrinsics check_include_file(emmintrin.h HAVE_EMMINTRIN_H) if(HAVE_EMMINTRIN_H) add_definitions(-DHAVE_SSE2) endif(HAVE_EMMINTRIN_H) + + # check if the platform has support for SSE3 intrinsics + check_include_file(pmmintrin.h HAVE_PMMINTRIN_H) + if(HAVE_PMMINTRIN_H) + add_definitions(-DHAVE_PMMINTRIN_H) + add_definitions(-DHAVE_SSE3) + else() + # check if the platform has specific intrinsics + check_include_file(intrin.h HAVE_INTRIN_H) + if(HAVE_INTRIN_H) + check_symbol_exists(_mm_addsub_ps intrin.h HAVE_DECL__MM_ADDSUB_PS) + if(HAVE_DECL__MM_ADDSUB_PS) + # assume that we have all SSE3 intrinsics + add_definitions(-DHAVE_INTRIN_H) + add_definitions(-DHAVE_SSE3) + endif(HAVE_DECL__MM_ADDSUB_PS) + endif(HAVE_INTRIN_H) + endif(HAVE_PMMINTRIN_H) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") else() # Check if we can always use detection code above? -- cgit v1.1 From ea0c10a22b233af7ef9ddd9bd6b71d3ab9208cff Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 7 Jul 2015 12:50:30 +0300 Subject: Add SSE3 optimized version of ffts_execute_1d_real --- src/ffts_real.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 80 insertions(+), 13 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index f3b5126..f3fbaae 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -39,6 +39,18 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #elif HAVE_SSE #include + +/* check if have SSE3 intrinsics */ +#ifdef HAVE_PMMINTRIN_H +#include +#elif HAVE_INTRIN_H +#include +#else +/* avoid using negative zero as some configurations have problems with those */ +static const FFTS_ALIGN(16) unsigned int sign_mask[4] = { + 0x80000000, 0, 0x80000000, 0 +}; +#endif #endif static void @@ -88,8 +100,10 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) p->plans[0]->transform(p->plans[0], input, buf); +#ifndef HAVE_SSE buf[N + 0] = buf[0]; buf[N + 1] = buf[1]; +#endif #ifdef __ARM_NEON__ for (i = 0; i < N/2; i += 2) { @@ -134,18 +148,67 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -#elif HAVE_SSE +#elif HAVE_SSE3 if (N < 8) { - for (i = 0; i < N/2; i++) { - out[2*i + 0] = - buf[ 2*i + 0] * A[2*i + 0] - buf[ 2*i + 1] * A[2*i + 1] + - buf[N - 2*i + 0] * B[2*i + 0] + buf[N - 2*i + 1] * B[2*i + 1]; - out[2*i + 1] = - buf[ 2*i + 1] * A[2*i + 0] + buf[ 2*i + 0] * A[2*i + 1] + - buf[N - 2*i + 0] * B[2*i + 1] - buf[N - 2*i + 1] * B[2*i + 0]; + const __m128 t0 = _mm_load_ps(buf); + const __m128 t1 = _mm_load_ps(A); + const __m128 t2 = _mm_load_ps(B); + + _mm_store_ps(out, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t0, _mm_moveldup_ps(t1)), + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t1))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,2,0,0)), t2)))); + } else { + __m128 t0 = _mm_load_ps(buf); + + for (i = 0; i < N; i += 8) { + __m128 t1 = _mm_load_ps(buf + i); + __m128 t2 = _mm_load_ps(buf + N - i - 4); + __m128 t3 = _mm_load_ps(A + i); + __m128 t4 = _mm_load_ps(B + i); + + _mm_store_ps(out + i, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t0 = _mm_load_ps(buf + N - i - 8); + t1 = _mm_load_ps(buf + i + 4); + t3 = _mm_load_ps(A + i + 4); + t4 = _mm_load_ps(B + i + 4); + + _mm_store_ps(out + i + 4, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); } + } +#elif HAVE_SSE + if (N < 8) { + const __m128 c0 = _mm_load_ps((const float*) sign_mask); + const __m128 t0 = _mm_load_ps(buf); + const __m128 t1 = _mm_load_ps(A); + const __m128 t2 = _mm_load_ps(B); + + _mm_store_ps(out, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t0, _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,2,0,0)), t2)), + _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t2, c0), _mm_xor_ps(t2, c0), + _MM_SHUFFLE(2,3,0,1))))); } else { - const __m128 c0 = _mm_set_ps(0.0f, -0.0f, 0.0f, -0.0f); + const __m128 c0 = _mm_load_ps((const float*) sign_mask); __m128 t0 = _mm_load_ps(buf); for (i = 0; i < N; i += 8) { @@ -278,7 +341,7 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) __m128 t2 = _mm_load_ps(in + N - i - 4); __m128 t3 = _mm_load_ps(A + i); __m128 t4 = _mm_load_ps(B + i); - + _mm_store_ps(buf + i, _mm_add_ps(_mm_sub_ps(_mm_add_ps( _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), @@ -361,10 +424,14 @@ ffts_init_1d_real(size_t N, int sign) if (sign < 0) { for (i = 0; i < N/2; i++) { - p->A[2 * i + 0] = (float) (0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); - p->A[2 * i + 1] = (float) (0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->A[2 * i + 0] = (float) ( 0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); + p->A[2 * i + 1] = (float) ( 0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); +#ifdef HAVE_SSE3 + p->B[2 * i + 0] = (float) (-0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); +#else p->B[2 * i + 0] = (float) (0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); - p->B[2 * i + 1] = (float) (0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); +#endif + p->B[2 * i + 1] = (float) ( 0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); } } else { for (i = 0; i < N/2; i++) { -- cgit v1.1 From 95c783a2afd9a2e299812be2623fecd415bd1c41 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 7 Jul 2015 19:14:22 +0300 Subject: Add SSE3 optimized version of ffts_execute_1d_real_inv --- src/ffts_real.c | 98 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 20 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index f3fbaae..5c01103 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -47,9 +47,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #else /* avoid using negative zero as some configurations have problems with those */ -static const FFTS_ALIGN(16) unsigned int sign_mask[4] = { +static const FFTS_ALIGN(16) unsigned int sign_mask_even[4] = { 0x80000000, 0, 0x80000000, 0 }; +static const FFTS_ALIGN(16) unsigned int sign_mask_odd[4] = { + 0, 0x80000000, 0, 0x80000000 +}; #endif #endif @@ -150,9 +153,9 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) } #elif HAVE_SSE3 if (N < 8) { - const __m128 t0 = _mm_load_ps(buf); - const __m128 t1 = _mm_load_ps(A); - const __m128 t2 = _mm_load_ps(B); + __m128 t0 = _mm_load_ps(buf); + __m128 t1 = _mm_load_ps(A); + __m128 t2 = _mm_load_ps(B); _mm_store_ps(out, _mm_add_ps(_mm_addsub_ps( _mm_mul_ps(t0, _mm_moveldup_ps(t1)), @@ -194,10 +197,10 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) } #elif HAVE_SSE if (N < 8) { - const __m128 c0 = _mm_load_ps((const float*) sign_mask); - const __m128 t0 = _mm_load_ps(buf); - const __m128 t1 = _mm_load_ps(A); - const __m128 t2 = _mm_load_ps(B); + __m128 c0 = _mm_load_ps((const float*) sign_mask_even); + __m128 t0 = _mm_load_ps(buf); + __m128 t1 = _mm_load_ps(A); + __m128 t2 = _mm_load_ps(B); _mm_store_ps(out, _mm_add_ps(_mm_add_ps(_mm_add_ps( _mm_mul_ps(t0, _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,2,0,0))), @@ -208,7 +211,7 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) _mm_shuffle_ps(_mm_xor_ps(t2, c0), _mm_xor_ps(t2, c0), _MM_SHUFFLE(2,3,0,1))))); } else { - const __m128 c0 = _mm_load_ps((const float*) sign_mask); + __m128 c0 = _mm_load_ps((const float*) sign_mask_even); __m128 t0 = _mm_load_ps(buf); for (i = 0; i < N; i += 8) { @@ -322,18 +325,69 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -#elif HAVE_SSE +#elif HAVE_SSE3 if (N < 8) { - for (i = 0; i < N/2; i++) { - buf[2*i + 0] = - in[ 2*i + 0] * A[2*i + 0] + in[ 2*i + 1] * A[2*i + 1] + - in[N - 2*i + 0] * B[2*i + 0] - in[N - 2*i + 1] * B[2*i + 1]; - buf[2*i + 1] = - in[ 2*i + 1] * A[2*i + 0] - in[ 2*i + 0] * A[2*i + 1] - - in[N - 2*i + 0] * B[2*i + 1] - in[N - 2*i + 1] * B[2*i + 0]; + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[4]); + __m128 t1 = _mm_load_ps(in); + __m128 t2 = _mm_load_ps(A); + __m128 t3 = _mm_load_ps(B); + + _mm_store_ps(buf, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t2)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t2))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3)))); + } else { + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); + + for (i = 0; i < N; i += 8) { + __m128 t1 = _mm_load_ps(in + i); + __m128 t2 = _mm_load_ps(in + N - i - 4); + __m128 t3 = _mm_load_ps(A + i); + __m128 t4 = _mm_load_ps(B + i); + + _mm_store_ps(buf + i, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t0 = _mm_load_ps(in + N - i - 8); + t1 = _mm_load_ps(in + i + 4); + t3 = _mm_load_ps(A + i + 4); + t4 = _mm_load_ps(B + i + 4); + + _mm_store_ps(buf + i + 4, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); } + } +#elif HAVE_SSE + if (N < 8) { + __m128 c0 = _mm_load_ps((const float*) sign_mask_odd); + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[4]); + __m128 t1 = _mm_load_ps(in); + __m128 t2 = _mm_load_ps(A); + __m128 t3 = _mm_load_ps(B); + + _mm_store_ps(buf, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t3, c0)))); } else { - const __m128 c0 = _mm_set_ps(-0.0f, 0.0f, -0.0f, 0.0f); + __m128 c0 = _mm_load_ps((const float*) sign_mask_odd); __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); for (i = 0; i < N; i += 8) { @@ -341,7 +395,7 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) __m128 t2 = _mm_load_ps(in + N - i - 4); __m128 t3 = _mm_load_ps(A + i); __m128 t4 = _mm_load_ps(B + i); - + _mm_store_ps(buf + i, _mm_add_ps(_mm_sub_ps(_mm_add_ps( _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), @@ -429,14 +483,18 @@ ffts_init_1d_real(size_t N, int sign) #ifdef HAVE_SSE3 p->B[2 * i + 0] = (float) (-0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); #else - p->B[2 * i + 0] = (float) (0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); + p->B[2 * i + 0] = (float) ( 0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); #endif p->B[2 * i + 1] = (float) ( 0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); } } else { for (i = 0; i < N/2; i++) { p->A[2 * i + 0] = (float) (1.0 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); +#ifdef HAVE_SSE3 + p->A[2 * i + 1] = (float) (1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); +#else p->A[2 * i + 1] = (float) (1.0 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); +#endif p->B[2 * i + 0] = (float) (1.0 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); p->B[2 * i + 1] = (float) (1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); } -- cgit v1.1 From ed8a12ca33ffa69604bc261e65a17ea6c04fbeb8 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 8 Jul 2015 16:38:58 +0300 Subject: Half the number of calls to sin/cos functions in ffts_init_1d_real --- src/ffts_real.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 12 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index 5c01103..a737696 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -477,27 +477,83 @@ ffts_init_1d_real(size_t N, int sign) } if (sign < 0) { - for (i = 0; i < N/2; i++) { - p->A[2 * i + 0] = (float) ( 0.5 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); - p->A[2 * i + 1] = (float) ( 0.5 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + /* peel off the first */ + p->A[0] = 0.5f; + p->A[1] = -0.5f; #ifdef HAVE_SSE3 - p->B[2 * i + 0] = (float) (-0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); + p->B[0] = -0.5f; #else - p->B[2 * i + 0] = (float) ( 0.5 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); + p->B[0] = 0.5f; +#endif + p->B[1] = 0.5f; + + for (i = 1; i < N/4; i++) { + float t0 = (float) (0.5 * (1.0 - sin(2.0 * M_PI / N * i))); + float t1 = (float) (0.5 * (1.0 * cos(2.0 * M_PI / N * i))); + float t2 = (float) (0.5 * (1.0 + sin(2.0 * M_PI / N * i))); + + p->A[ 2 * i + 0] = t0; + p->A[N - 2 * i + 0] = t0; + p->A[ 2 * i + 1] = -t1; + p->A[N - 2 * i + 1] = t1; + +#ifdef HAVE_SSE3 + p->B[ 2 * i + 0] = -t2; + p->B[N - 2 * i + 0] = -t2; +#else + p->B[ 2 * i + 0] = t2; + p->B[N - 2 * i + 0] = t2; #endif - p->B[2 * i + 1] = (float) ( 0.5 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->B[ 2 * i + 1] = t1; + p->B[N - 2 * i + 1] = -t1; } + + /* and the last */ + p->A[2 * i + 0] = 0.0f; + p->A[2 * i + 1] = 0.0f; +#ifdef HAVE_SSE3 + p->B[2 * i + 0] = -1.0f; +#else + p->B[2 * i + 0] = 1.0f; +#endif + p->B[2 * i + 1] = 0.0f; } else { - for (i = 0; i < N/2; i++) { - p->A[2 * i + 0] = (float) (1.0 * ( 1.0 - sin(2.0 * M_PI / (double) N * (double) i))); + /* peel of the first */ + p->A[0] = 1.0f; +#ifdef HAVE_SSE3 + p->A[1] = 1.0f; +#else + p->A[1] = -1.0f; +#endif + p->B[0] = 1.0f; + p->B[1] = 1.0f; + + for (i = 1; i < N/4; i++) { + float t0 = (float) (1.0 * (1.0 - sin(2.0 * M_PI / N * i))); + float t1 = (float) (1.0 * (1.0 * cos(2.0 * M_PI / N * i))); + float t2 = (float) (1.0 * (1.0 + sin(2.0 * M_PI / N * i))); + + p->A[ 2 * i + 0] = t0; + p->A[N - 2 * i + 0] = t0; #ifdef HAVE_SSE3 - p->A[2 * i + 1] = (float) (1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->A[ 2 * i + 1] = t1; + p->A[N - 2 * i + 1] = -t1; #else - p->A[2 * i + 1] = (float) (1.0 * (-1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + p->A[ 2 * i + 1] = -t1; + p->A[N - 2 * i + 1] = t1; #endif - p->B[2 * i + 0] = (float) (1.0 * ( 1.0 + sin(2.0 * M_PI / (double) N * (double) i))); - p->B[2 * i + 1] = (float) (1.0 * ( 1.0 * cos(2.0 * M_PI / (double) N * (double) i))); + + p->B[ 2 * i + 0] = t2; + p->B[N - 2 * i + 0] = t2; + p->B[ 2 * i + 1] = t1; + p->B[N - 2 * i + 1] = -t1; } + + /* and the last */ + p->A[2 * i + 0] = 0.0f; + p->A[2 * i + 1] = 0.0f; + p->B[2 * i + 0] = 2.0f; + p->B[2 * i + 1] = 0.0f; } return p; -- cgit v1.1 From 06eb1e603f9527c1cf205d630fa5c58bd808f9fb Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 9 Jul 2015 15:30:18 +0300 Subject: Add new attributes to control/improve branch predictions --- src/ffts_attributes.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/ffts_attributes.h b/src/ffts_attributes.h index 763a6af..bdfd616 100644 --- a/src/ffts_attributes.h +++ b/src/ffts_attributes.h @@ -96,4 +96,16 @@ #define FFTS_ASSUME_ALIGNED_32(x) x #endif +#if defined(__GNUC__) +#define FFTS_LIKELY(cond) __builtin_expect(!!(cond), 1) +#else +#define FFTS_LIKELY(cond) cond +#endif + +#if defined(__GNUC__) +#define FFTS_UNLIKELY(cond) __builtin_expect(!!(cond), 0) +#else +#define FFTS_UNLIKELY(cond) cond +#endif + #endif /* FFTS_ATTRIBUTES_H */ -- cgit v1.1 From 7e018bb933d5291155739614e422773c4c2d8781 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 9 Jul 2015 15:37:53 +0300 Subject: Unroll loops to process 64 byte cache line per iteration --- src/ffts_real.c | 244 +++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 205 insertions(+), 39 deletions(-) diff --git a/src/ffts_real.c b/src/ffts_real.c index a737696..0327f15 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -152,22 +152,36 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) ); } #elif HAVE_SSE3 - if (N < 8) { + if (FFTS_UNLIKELY(N <= 8)) { __m128 t0 = _mm_load_ps(buf); - __m128 t1 = _mm_load_ps(A); - __m128 t2 = _mm_load_ps(B); + __m128 t1 = _mm_load_ps(buf + N - 4); + __m128 t2 = _mm_load_ps(A); + __m128 t3 = _mm_load_ps(B); _mm_store_ps(out, _mm_add_ps(_mm_addsub_ps( - _mm_mul_ps(t0, _mm_moveldup_ps(t1)), + _mm_mul_ps(t0, _mm_moveldup_ps(t2)), _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)), - _mm_movehdup_ps(t1))), _mm_addsub_ps( - _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(3,3,1,1)), - _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1))), - _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,2,0,0)), t2)))); + _mm_movehdup_ps(t2))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3)))); + + if (N == 8) { + t2 = _mm_load_ps(A + 4); + t3 = _mm_load_ps(B + 4); + + _mm_store_ps(out + 4, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t2)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t2))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(2,2,0,0)), t3)))); + } } else { __m128 t0 = _mm_load_ps(buf); - for (i = 0; i < N; i += 8) { + for (i = 0; i < N; i += 16) { __m128 t1 = _mm_load_ps(buf + i); __m128 t2 = _mm_load_ps(buf + N - i - 4); __m128 t3 = _mm_load_ps(A + i); @@ -193,28 +207,69 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t1 = _mm_load_ps(buf + i + 8); + t2 = _mm_load_ps(buf + N - i - 12); + t3 = _mm_load_ps(A + i + 8); + t4 = _mm_load_ps(B + i + 8); + + _mm_store_ps(out + i + 8, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t0 = _mm_load_ps(buf + N - i - 16); + t1 = _mm_load_ps(buf + i + 12); + t3 = _mm_load_ps(A + i + 12); + t4 = _mm_load_ps(B + i + 12); + + _mm_store_ps(out + i + 12, _mm_add_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); } } #elif HAVE_SSE - if (N < 8) { + if (FFTS_UNLIKELY(N <= 8)) { __m128 c0 = _mm_load_ps((const float*) sign_mask_even); __m128 t0 = _mm_load_ps(buf); - __m128 t1 = _mm_load_ps(A); - __m128 t2 = _mm_load_ps(B); + __m128 t1 = _mm_load_ps(buf + N - 4); + __m128 t2 = _mm_load_ps(A); + __m128 t3 = _mm_load_ps(B); _mm_store_ps(out, _mm_add_ps(_mm_add_ps(_mm_add_ps( - _mm_mul_ps(t0, _mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(t0, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))), _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,3,0,1)), - _mm_xor_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(3,3,1,1)), c0))), - _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(2,2,0,0)), t2)), - _mm_mul_ps(_mm_shuffle_ps(t0, t0, _MM_SHUFFLE(3,3,1,1)), - _mm_shuffle_ps(_mm_xor_ps(t2, c0), _mm_xor_ps(t2, c0), + _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3)), + _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t3, c0), _mm_xor_ps(t3, c0), _MM_SHUFFLE(2,3,0,1))))); + + if (N == 8) { + t2 = _mm_load_ps(A + 4); + t3 = _mm_load_ps(B + 4); + + _mm_store_ps(out + 4, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(2,2,0,0)), t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t3, c0), _mm_xor_ps(t3, c0), + _MM_SHUFFLE(2,3,0,1))))); + } } else { __m128 c0 = _mm_load_ps((const float*) sign_mask_even); __m128 t0 = _mm_load_ps(buf); - for (i = 0; i < N; i += 8) { + for (i = 0; i < N; i += 16) { __m128 t1 = _mm_load_ps(buf + i); __m128 t2 = _mm_load_ps(buf + N - i - 4); __m128 t3 = _mm_load_ps(A + i); @@ -242,6 +297,34 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0), _MM_SHUFFLE(2,3,0,1))))); + + t1 = _mm_load_ps(buf + i + 8); + t2 = _mm_load_ps(buf + N - i - 12); + t3 = _mm_load_ps(A + i + 8); + t4 = _mm_load_ps(B + i + 8); + + _mm_store_ps(out + i + 8, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0), + _MM_SHUFFLE(2,3,0,1))))); + + t0 = _mm_load_ps(buf + N - i - 16); + t1 = _mm_load_ps(buf + i + 12); + t3 = _mm_load_ps(A + i + 12); + t4 = _mm_load_ps(B + i + 12); + + _mm_store_ps(out + i + 12, _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(_mm_xor_ps(t4, c0), _mm_xor_ps(t4, c0), + _MM_SHUFFLE(2,3,0,1))))); } } #else @@ -326,23 +409,37 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) ); } #elif HAVE_SSE3 - if (N < 8) { - __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[4]); + if (FFTS_UNLIKELY(N <= 8)) { + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); __m128 t1 = _mm_load_ps(in); - __m128 t2 = _mm_load_ps(A); - __m128 t3 = _mm_load_ps(B); + __m128 t2 = _mm_load_ps(in + N - 4); + __m128 t3 = _mm_load_ps(A); + __m128 t4 = _mm_load_ps(B); _mm_store_ps(buf, _mm_sub_ps(_mm_addsub_ps( - _mm_mul_ps(t1, _mm_moveldup_ps(t2)), + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), - _mm_movehdup_ps(t2))), _mm_addsub_ps( - _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), - _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1))), - _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), t3)))); + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)))); + + if (N == 8) { + t3 = _mm_load_ps(A + 4); + t4 = _mm_load_ps(B + 4); + + _mm_store_ps(buf + 4, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t2, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2,2,0,0)), t4)))); + } } else { __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); - for (i = 0; i < N; i += 8) { + for (i = 0; i < N; i += 16) { __m128 t1 = _mm_load_ps(in + i); __m128 t2 = _mm_load_ps(in + N - i - 4); __m128 t3 = _mm_load_ps(A + i); @@ -368,29 +465,70 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t1 = _mm_load_ps(in + i + 8); + t2 = _mm_load_ps(in + N - i - 12); + t3 = _mm_load_ps(A + i + 8); + t4 = _mm_load_ps(B + i + 8); + + _mm_store_ps(buf + i + 8, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), t4)))); + + t0 = _mm_load_ps(in + N - i - 16); + t1 = _mm_load_ps(in + i + 12); + t3 = _mm_load_ps(A + i + 12); + t4 = _mm_load_ps(B + i + 12); + + _mm_store_ps(buf + i + 12, _mm_sub_ps(_mm_addsub_ps( + _mm_mul_ps(t1, _mm_moveldup_ps(t3)), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_movehdup_ps(t3))), _mm_addsub_ps( + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), t4)))); } } #elif HAVE_SSE - if (N < 8) { + if (FFTS_UNLIKELY(N <= 8)) { __m128 c0 = _mm_load_ps((const float*) sign_mask_odd); - __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[4]); + __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); __m128 t1 = _mm_load_ps(in); - __m128 t2 = _mm_load_ps(A); - __m128 t3 = _mm_load_ps(B); + __m128 t2 = _mm_load_ps(in + N - 4); + __m128 t3 = _mm_load_ps(A); + __m128 t4 = _mm_load_ps(B); _mm_store_ps(buf, _mm_add_ps(_mm_sub_ps(_mm_add_ps( - _mm_mul_ps(t1, _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), - _mm_xor_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(3,3,1,1)), c0))), - _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(3,3,1,1)), - _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,3,0,1)))), - _mm_mul_ps(_mm_shuffle_ps(t0, t1, _MM_SHUFFLE(2,2,0,0)), - _mm_xor_ps(t3, c0)))); + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); + + if (N == 8) { + t3 = _mm_load_ps(A + 4); + t4 = _mm_load_ps(B + 4); + + _mm_store_ps(buf + 4, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t2, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t2, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t2, t1, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); + } } else { __m128 c0 = _mm_load_ps((const float*) sign_mask_odd); __m128 t0 = _mm_loadl_pi(_mm_setzero_ps(), (const __m64*) &in[N]); - for (i = 0; i < N; i += 8) { + for (i = 0; i < N; i += 16) { __m128 t1 = _mm_load_ps(in + i); __m128 t2 = _mm_load_ps(in + N - i - 4); __m128 t3 = _mm_load_ps(A + i); @@ -418,6 +556,34 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), _mm_xor_ps(t4, c0)))); + + t1 = _mm_load_ps(in + i + 8); + t2 = _mm_load_ps(in + N - i - 12); + t3 = _mm_load_ps(A + i + 8); + t4 = _mm_load_ps(B + i + 8); + + _mm_store_ps(buf + i + 8, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t0, t2, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); + + t0 = _mm_load_ps(in + N - i - 16); + t1 = _mm_load_ps(in + i + 12); + t3 = _mm_load_ps(A + i + 12); + t4 = _mm_load_ps(B + i + 12); + + _mm_store_ps(buf + i + 12, _mm_add_ps(_mm_sub_ps(_mm_add_ps( + _mm_mul_ps(t1, _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(2,2,0,0))), + _mm_mul_ps(_mm_shuffle_ps(t1, t1, _MM_SHUFFLE(2,3,0,1)), + _mm_xor_ps(_mm_shuffle_ps(t3, t3, _MM_SHUFFLE(3,3,1,1)), c0))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(3,3,1,1)), + _mm_shuffle_ps(t4, t4, _MM_SHUFFLE(2,3,0,1)))), + _mm_mul_ps(_mm_shuffle_ps(t2, t0, _MM_SHUFFLE(2,2,0,0)), + _mm_xor_ps(t4, c0)))); } } #else -- cgit v1.1 From 6d3047f0ada0b931df9f6c1d49f037931c3c67f3 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 10 Jul 2015 11:01:39 +0300 Subject: SSE3 detection failed with MSVC 2005 x64 --- CMakeLists.txt | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 43b6add..4c683af 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,7 @@ option(ENABLE_SHARED "Enable building a shared library." OFF ) +include(CheckCSourceCompiles) include(CheckIncludeFile) include(CheckSymbolExists) include(CMakePushCheckState) @@ -152,12 +153,20 @@ if(NOT CMAKE_CROSSCOMPILING) # check if the platform has specific intrinsics check_include_file(intrin.h HAVE_INTRIN_H) if(HAVE_INTRIN_H) - check_symbol_exists(_mm_addsub_ps intrin.h HAVE_DECL__MM_ADDSUB_PS) - if(HAVE_DECL__MM_ADDSUB_PS) + check_c_source_compiles(" + #include + int main(int argc, char** argv) + { + (void) argv; + (void) argc; + return _mm_movemask_ps(_mm_moveldup_ps(_mm_set_ss(1.0f))); + }" HAVE__MM_MOVELDUP_PS + ) + if(HAVE__MM_MOVELDUP_PS) # assume that we have all SSE3 intrinsics add_definitions(-DHAVE_INTRIN_H) add_definitions(-DHAVE_SSE3) - endif(HAVE_DECL__MM_ADDSUB_PS) + endif(HAVE__MM_MOVELDUP_PS) endif(HAVE_INTRIN_H) endif(HAVE_PMMINTRIN_H) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") -- cgit v1.1 From 9885d87c6335d5b688bdf6b90e78de1add605d63 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 14 Jul 2015 17:08:14 +0300 Subject: Move trigonometric stuff to separate file. Implemented Oscar Buneman's method for generating a sequence of sines and cosines. --- CMakeLists.txt | 2 + src/ffts.c | 55 +------------- src/ffts_real.c | 2 +- src/ffts_trig.c | 221 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ src/ffts_trig.h | 48 ++++++++++++ 5 files changed, 274 insertions(+), 54 deletions(-) create mode 100644 src/ffts_trig.c create mode 100644 src/ffts_trig.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c683af..db53dd5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -218,6 +218,8 @@ set(FFTS_SOURCES src/ffts_real.c src/ffts_real_nd.c src/ffts_real_nd.h + src/ffts_trig.c + src/ffts_trig.h src/ffts_static.c src/ffts_static.h src/macros.h diff --git a/src/ffts.c b/src/ffts.c index f9cb9bb..6e12563 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -35,6 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts_internal.h" #include "ffts_static.h" +#include "ffts_trig.h" #include "macros.h" #include "patterns.h" @@ -199,58 +200,6 @@ void ffts_free_1d(ffts_plan_t *p) free(p); } -static void -ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size) -{ - double alpha, beta; - double c[2], s[2]; - int i; - - double x = 1.0 / table_size; - double z = x * x; - - /* polynomial approximations calculated using Sollya */ - - /* alpha = 2 * sin(M_PI_4 / m) * sin(M_PI_4 / m) */ - alpha = x * (1.1107207345394952717884501203293686870741139540138 + - z * (-0.114191397993514079911985272577099412137126013186879 + - z * 3.52164670852685621720746817665316575239342815885835e-3)); - alpha = alpha * alpha; - - /* beta = sin(M_PI_2 / m) */ - beta = x * (1.57079632679489455959753740899031981825828552246094 + - z * (-0.64596409735041482313988581154262647032737731933593 + - z * 7.9690915468332887416913479228242067620158195495605e-2)); - - /* cos(0) = 1.0, sin(0) = 0.0 */ - c[0] = 1.0; - s[0] = 0.0; - - table[ 0][0] = 1.0f; - table[ 0][1] = 0.0f; - table[table_size - 1][1] = -1.0f; - table[table_size - 1][0] = -0.0f; - - /* generate sine and cosine table with maximum error less than 1 ULP */ - for (i = 1; i < table_size/2; i += 2) { - c[1] = c[0] - ((alpha * c[0]) + (beta * s[0])); - s[1] = s[0] - ((alpha * s[0]) - (beta * c[0])); - - table[i + 0][0] = (float) c[1]; - table[i + 0][1] = (float) -s[1]; - table[table_size - i][0] = (float) s[1]; - table[table_size - i][1] = (float) -c[1]; - - c[0] = c[1] - ((alpha * c[1]) + (beta * s[1])); - s[0] = s[1] - ((alpha * s[1]) - (beta * c[1])); - - table[i + 1][0] = (float) c[0]; - table[i + 1][1] = (float) -s[0]; - table[table_size - i - 1][0] = (float) s[0]; - table[table_size - i - 1][1] = (float) -c[0]; - } -} - static int ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) { @@ -304,7 +253,7 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) m = leaf_N << (n_luts - 2); tmp = FFTS_MALLOC(m * sizeof(ffts_cpx_32f), 32); - ffts_generate_cosine_sine_32f(tmp, m); + ffts_generate_cosine_sine_pow2_32f(tmp, m); /* generate lookup tables */ stride = 1 << (n_luts - 1); diff --git a/src/ffts_real.c b/src/ffts_real.c index 0327f15..f1355c7 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -650,7 +650,7 @@ ffts_init_1d_real(size_t N, int sign) p->B[0] = -0.5f; #else p->B[0] = 0.5f; -#endif +#endif p->B[1] = 0.5f; for (i = 1; i < N/4; i++) { diff --git a/src/ffts_trig.c b/src/ffts_trig.c new file mode 100644 index 0000000..8af96b9 --- /dev/null +++ b/src/ffts_trig.c @@ -0,0 +1,221 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2015, Jukka Ojanen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "ffts_trig.h" + +/* 1/(2*cos(pow(2,-p)*pi)) */ +static const FFTS_ALIGN(16) unsigned int half_secant[66] = { + 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, + 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, + 0x00000001, 0x3fe00000, 0x00000005, 0x3fe00000, 0x00000014, 0x3fe00000, + 0x0000004f, 0x3fe00000, 0x0000013c, 0x3fe00000, 0x000004ef, 0x3fe00000, + 0x000013bd, 0x3fe00000, 0x00004ef5, 0x3fe00000, 0x00013bd4, 0x3fe00000, + 0x0004ef4f, 0x3fe00000, 0x0013bd3d, 0x3fe00000, 0x004ef4f3, 0x3fe00000, + 0x013bd3cd, 0x3fe00000, 0x04ef4f34, 0x3fe00000, 0x13bd3cde, 0x3fe00000, + 0x4ef4f46c, 0x3fe00000, 0x3bd3e0e7, 0x3fe00001, 0xef507722, 0x3fe00004, + 0xbd5114f9, 0x3fe00013, 0xf637de7d, 0x3fe0004e, 0xe8190891, 0x3fe0013b, + 0x9436640e, 0x3fe004f0, 0x9c61d971, 0x3fe013d1, 0xd17cba53, 0x3fe0503e, + 0x7bdb3895, 0x3fe1517a, 0x00000000, 0x00000000, 0x00000000, 0x00000000 +}; + +/* cos(pow(2,-p)*pi), sin(pow(2,-p)*pi) */ +static const FFTS_ALIGN(16) unsigned int cos_sin_pi_table[132] = { + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e0921fb, 0x00000000, 0x3ff00000, + 0x54442d18, 0x3e0921fb, 0x00000000, 0x3ff00000, 0x54442d18, 0x3e1921fb, + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e2921fb, 0x00000000, 0x3ff00000, + 0x54442d18, 0x3e3921fb, 0xffffffff, 0x3fefffff, 0x54442d18, 0x3e4921fb, + 0xfffffffe, 0x3fefffff, 0x54442d18, 0x3e5921fb, 0xfffffff6, 0x3fefffff, + 0x54442d16, 0x3e6921fb, 0xffffffd9, 0x3fefffff, 0x54442d0e, 0x3e7921fb, + 0xffffff62, 0x3fefffff, 0x54442cef, 0x3e8921fb, 0xfffffd88, 0x3fefffff, + 0x54442c73, 0x3e9921fb, 0xfffff621, 0x3fefffff, 0x54442a83, 0x3ea921fb, + 0xffffd886, 0x3fefffff, 0x544422c2, 0x3eb921fb, 0xffff6216, 0x3fefffff, + 0x544403c1, 0x3ec921fb, 0xfffd8858, 0x3fefffff, 0x544387ba, 0x3ed921fb, + 0xfff62162, 0x3fefffff, 0x544197a1, 0x3ee921fb, 0xffd88586, 0x3fefffff, + 0x5439d73a, 0x3ef921fb, 0xff62161a, 0x3fefffff, 0x541ad59e, 0x3f0921fb, + 0xfd885867, 0x3fefffff, 0x539ecf31, 0x3f1921fb, 0xf621619c, 0x3fefffff, + 0x51aeb57c, 0x3f2921fb, 0xd8858675, 0x3fefffff, 0x49ee4ea6, 0x3f3921fb, + 0x62161a34, 0x3fefffff, 0x2aecb360, 0x3f4921fb, 0x88586ee6, 0x3feffffd, + 0xaee6472e, 0x3f5921fa, 0x21621d02, 0x3feffff6, 0xbecca4ba, 0x3f6921f8, + 0x858e8a92, 0x3fefffd8, 0xfe670071, 0x3f7921f0, 0x169b92db, 0x3fefff62, + 0xfcdec784, 0x3f8921d1, 0x6084cd0d, 0x3feffd88, 0xf7a3667e, 0x3f992155, + 0xe3796d7e, 0x3feff621, 0xf10dd814, 0x3fa91f65, 0xa3d12526, 0x3fefd88d, + 0xbc29b42c, 0x3fb917a6, 0xcff75cb0, 0x3fef6297, 0x3c69a60b, 0x3fc8f8b8, + 0xcf328d46, 0x3fed906b, 0xa6aea963, 0x3fd87de2, 0x667f3bcd, 0x3fe6a09e, + 0x667f3bcd, 0x3fe6a09e, 0x00000000, 0x00000000, 0x00000000, 0x3ff00000 +}; + +int +ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size) +{ + double alpha, beta; + double c[2], s[2]; + double x, z; + int i; + + if (!table || !table_size) { + return -1; + } + + /* the first */ + table[0][0] = 1.0f; + table[0][1] = -0.0f; + + if (FFTS_UNLIKELY(table_size == 1)) { + goto exit; + } + + if (FFTS_UNLIKELY(table_size == 2)) { + /* skip over */ + i = 1; + goto mid_point; + } + + /* polynomial approximations calculated using Sollya */ + x = 1.0 / table_size; + z = x * x; + + /* alpha = 2 * sin(M_PI_4 / m) * sin(M_PI_4 / m) */ + alpha = x * (1.1107207345394952717884501203293686870741139540138 + + z * (-0.114191397993514079911985272577099412137126013186879 + + z * 3.52164670852685621720746817665316575239342815885835e-3)); + alpha = alpha * alpha; + + /* beta = sin(M_PI_2 / m) */ + beta = x * (1.57079632679489455959753740899031981825828552246094 + + z * (-0.64596409735041482313988581154262647032737731933593 + + z * 7.9690915468332887416913479228242067620158195495605e-2)); + + /* cos(0) = 1.0, sin(0) = 0.0 */ + c[0] = 1.0; + s[0] = 0.0; + + /* generate sine and cosine table with maximum error less than 1 ULP */ + for (i = 1; i < (table_size + 1)/2; i++) { + c[1] = c[0] - ((alpha * c[0]) + (beta * s[0])); + s[1] = s[0] - ((alpha * s[0]) - (beta * c[0])); + + table[i + 0][0] = (float) c[1]; + table[i + 0][1] = (float) -s[1]; + table[table_size - i][0] = (float) s[1]; + table[table_size - i][1] = (float) -c[1]; + + c[0] = c[1]; + s[0] = s[1]; + } + + if (FFTS_UNLIKELY(table_size & 1)) { + goto exit; + } + +mid_point: + table[i][0] = 0.70710677f; + table[i][1] = -0.70710677f; + +exit: + return 0; +} + +/* Oscar Buneman's method for generating a sequence of sines and cosines. +* Expired US Patent 4,878,187 A +* +* D. Potts, G. Steidl, M. Tasche, Numerical stability of fast +* trigonometric transforms — a worst case study, +* J. Concrete Appl. Math. 1 (2003) 1–36 +* +* O. Buneman, Stable on–line creation of sines and cosines of +* successive angles, Proc. IEEE 75, 1434 – 1435 (1987). +*/ +int +ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) +{ + const ffts_cpx_64f *FFTS_RESTRICT ct; + const double *FFTS_RESTRICT hs; + ffts_cpx_64f FFTS_ALIGN(16) w[32]; + int i, log_2, offset; + + /* size must be a power of two */ + if (!table || !table_size || (table_size & (table_size - 1))) { + return -1; + } + + /* the first */ + table[0][0] = 1.0f; + table[0][1] = -0.0f; + + if (FFTS_UNLIKELY(table_size == 1)) { + goto exit; + } + + if (FFTS_UNLIKELY(table_size == 2)) { + /* skip over */ + i = 1; + goto mid_point; + } + + /* calculate table offset */ + FFTS_ASSUME(table_size/2 > 1); + log_2 = ffts_ctzl(table_size); + FFTS_ASSUME(log_2 > 1); + offset = 32 - log_2; + ct = (const ffts_cpx_64f*) + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[4 * offset]); + hs = (const double*) &half_secant[2 * offset]; + + /* initialize from table */ + for (i = 0; i <= log_2; i++) { + w[i][0] = ct[i][0]; + w[i][1] = ct[i][1]; + } + + /* generate sine and cosine table with maximum error less than 0.5 ULP */ + for (i = 1; i < table_size/2; i++) { + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + table[i + 0][0] = (float) w[log_2][0]; + table[i + 0][1] = (float) -w[log_2][1]; + table[table_size - i][0] = (float) w[log_2][1]; + table[table_size - i][1] = (float) -w[log_2][0]; + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + } + +mid_point: + table[i][0] = 0.70710677f; + table[i][1] = -0.70710677f; + +exit: + return 0; +} \ No newline at end of file diff --git a/src/ffts_trig.h b/src/ffts_trig.h new file mode 100644 index 0000000..258c176 --- /dev/null +++ b/src/ffts_trig.h @@ -0,0 +1,48 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2015, Jukka Ojanen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef FFTS_TRIG_H +#define FFTS_TRIG_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + +#include "ffts_internal.h" + +int +ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size); + +int +ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size); + +#endif /* FFTS_TRIG_H */ -- cgit v1.1 From f571c435ceccc56e79c024f626a91c57f52d94ff Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 14 Jul 2015 23:51:35 +0300 Subject: FFTS is no longer depended on any other math library, and this should help to verify its numerical accuracy. --- src/ffts_internal.h | 1 - src/ffts_real.c | 80 ++------------------------------ src/ffts_trig.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++-- src/ffts_trig.h | 5 ++ src/patterns.c | 1 - 5 files changed, 135 insertions(+), 82 deletions(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 2d1dbd3..59f46f8 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -40,7 +40,6 @@ #include "types.h" #include -#include #include #ifdef HAVE_STDINT_H diff --git a/src/ffts_real.c b/src/ffts_real.c index f1355c7..f6e6127 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts_real.h" #include "ffts_internal.h" +#include "ffts_trig.h" #ifdef HAVE_NEON #include @@ -604,7 +605,6 @@ ffts_plan_t* ffts_init_1d_real(size_t N, int sign) { ffts_plan_t *p; - size_t i; p = (ffts_plan_t*) calloc(1, sizeof(*p) + sizeof(*p->plans)); if (!p) { @@ -642,85 +642,11 @@ ffts_init_1d_real(size_t N, int sign) goto cleanup; } - if (sign < 0) { - /* peel off the first */ - p->A[0] = 0.5f; - p->A[1] = -0.5f; -#ifdef HAVE_SSE3 - p->B[0] = -0.5f; -#else - p->B[0] = 0.5f; -#endif - p->B[1] = 0.5f; - - for (i = 1; i < N/4; i++) { - float t0 = (float) (0.5 * (1.0 - sin(2.0 * M_PI / N * i))); - float t1 = (float) (0.5 * (1.0 * cos(2.0 * M_PI / N * i))); - float t2 = (float) (0.5 * (1.0 + sin(2.0 * M_PI / N * i))); - - p->A[ 2 * i + 0] = t0; - p->A[N - 2 * i + 0] = t0; - p->A[ 2 * i + 1] = -t1; - p->A[N - 2 * i + 1] = t1; - #ifdef HAVE_SSE3 - p->B[ 2 * i + 0] = -t2; - p->B[N - 2 * i + 0] = -t2; + ffts_generate_table_1d_real_32f(p, sign, 1); #else - p->B[ 2 * i + 0] = t2; - p->B[N - 2 * i + 0] = t2; + ffts_generate_table_1d_real_32f(p, sign, 0); #endif - p->B[ 2 * i + 1] = t1; - p->B[N - 2 * i + 1] = -t1; - } - - /* and the last */ - p->A[2 * i + 0] = 0.0f; - p->A[2 * i + 1] = 0.0f; -#ifdef HAVE_SSE3 - p->B[2 * i + 0] = -1.0f; -#else - p->B[2 * i + 0] = 1.0f; -#endif - p->B[2 * i + 1] = 0.0f; - } else { - /* peel of the first */ - p->A[0] = 1.0f; -#ifdef HAVE_SSE3 - p->A[1] = 1.0f; -#else - p->A[1] = -1.0f; -#endif - p->B[0] = 1.0f; - p->B[1] = 1.0f; - - for (i = 1; i < N/4; i++) { - float t0 = (float) (1.0 * (1.0 - sin(2.0 * M_PI / N * i))); - float t1 = (float) (1.0 * (1.0 * cos(2.0 * M_PI / N * i))); - float t2 = (float) (1.0 * (1.0 + sin(2.0 * M_PI / N * i))); - - p->A[ 2 * i + 0] = t0; - p->A[N - 2 * i + 0] = t0; -#ifdef HAVE_SSE3 - p->A[ 2 * i + 1] = t1; - p->A[N - 2 * i + 1] = -t1; -#else - p->A[ 2 * i + 1] = -t1; - p->A[N - 2 * i + 1] = t1; -#endif - - p->B[ 2 * i + 0] = t2; - p->B[N - 2 * i + 0] = t2; - p->B[ 2 * i + 1] = t1; - p->B[N - 2 * i + 1] = -t1; - } - - /* and the last */ - p->A[2 * i + 0] = 0.0f; - p->A[2 * i + 1] = 0.0f; - p->B[2 * i + 0] = 2.0f; - p->B[2 * i + 1] = 0.0f; - } return p; diff --git a/src/ffts_trig.c b/src/ffts_trig.c index 8af96b9..514a1e5 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -118,7 +118,7 @@ ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size) c[0] = 1.0; s[0] = 0.0; - /* generate sine and cosine table with maximum error less than 1 ULP */ + /* generate sine and cosine tables with maximum error less than 1 ULP */ for (i = 1; i < (table_size + 1)/2; i++) { c[1] = c[0] - ((alpha * c[0]) + (beta * s[0])); s[1] = s[0] - ((alpha * s[0]) - (beta * c[0])); @@ -190,13 +190,13 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[4 * offset]); hs = (const double*) &half_secant[2 * offset]; - /* initialize from table */ + /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { w[i][0] = ct[i][0]; w[i][1] = ct[i][1]; } - /* generate sine and cosine table with maximum error less than 0.5 ULP */ + /* generate sine and cosine tables with maximum error less than 0.5 ULP */ for (i = 1; i < table_size/2; i++) { /* calculate trailing zeros in index */ log_2 = ffts_ctzl(i); @@ -218,4 +218,128 @@ mid_point: exit: return 0; +} + +int +ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, + int sign, + int invert) +{ + const ffts_cpx_64f *FFTS_RESTRICT ct; + const double *FFTS_RESTRICT hs; + ffts_cpx_64f FFTS_ALIGN(16) w[32]; + int i, log_2, offset, N; + float *A, *B; + + if (!p) { + return -1; + } + + A = (float*) FFTS_ASSUME_ALIGNED_32(p->A); + B = (float*) FFTS_ASSUME_ALIGNED_32(p->B); + N = (int) p->N; + + /* the first */ + if (sign < 0) { + A[0] = 0.5f; + A[1] = -0.5f; + B[0] = invert ? -0.5f : 0.5f; + B[1] = 0.5f; + } else { + /* peel of the first */ + A[0] = 1.0f; + A[1] = invert ? 1.0f : -1.0f; + B[0] = 1.0f; + B[1] = 1.0f; + } + + if (FFTS_UNLIKELY(N == 4)) { + i = 1; + goto last; + } + + /* calculate table offset */ + FFTS_ASSUME(N / 4 > 1); + log_2 = ffts_ctzl(N); + FFTS_ASSUME(log_2 > 2); + offset = 34 - log_2; + ct = (const ffts_cpx_64f*) + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[4 * offset]); + hs = (const double*) &half_secant[2 * offset]; + + /* initialize from lookup table */ + for (i = 0; i <= log_2; i++) { + w[i][0] = ct[i][0]; + w[i][1] = ct[i][1]; + } + + /* generate sine and cosine tables with maximum error less than 0.5 ULP */ + if (sign < 0) { + for (i = 1; i < N/4; i++) { + float t0, t1, t2; + + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + t0 = (float) (0.5 * (1.0 - w[log_2][1])); + t1 = (float) (0.5 * w[log_2][0]); + t2 = (float) (0.5 * (1.0 + w[log_2][1])); + + A[ 2 * i + 0] = t0; + A[N - 2 * i + 0] = t0; + A[ 2 * i + 1] = -t1; + A[N - 2 * i + 1] = t1; + + B[ 2 * i + 0] = invert ? -t2 : t2; + B[N - 2 * i + 0] = invert ? -t2 : t2; + B[ 2 * i + 1] = t1; + B[N - 2 * i + 1] = -t1; + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + } + } else { + for (i = 1; i < N/4; i++) { + float t0, t1, t2; + + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + t0 = (float) (1.0 - w[log_2][1]); + t1 = (float) w[log_2][0]; + t2 = (float) (1.0 + w[log_2][1]); + + A[ 2 * i + 0] = t0; + A[N - 2 * i + 0] = t0; + A[ 2 * i + 1] = invert ? t1 : -t1; + A[N - 2 * i + 1] = invert ? -t1 : t1; + + B[ 2 * i + 0] = t2; + B[N - 2 * i + 0] = t2; + B[ 2 * i + 1] = t1; + B[N - 2 * i + 1] = -t1; + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + } + } + +last: + if (sign < 0) { + A[2 * i + 0] = 0.0f; + A[2 * i + 1] = 0.0f; + B[2 * i + 0] = invert ? -1.0f : 1.0f; + B[2 * i + 1] = 0.0f; + } else { + A[2 * i + 0] = 0.0f; + A[2 * i + 1] = 0.0f; + B[2 * i + 0] = 2.0f; + B[2 * i + 1] = 0.0f; + } + + return 0; } \ No newline at end of file diff --git a/src/ffts_trig.h b/src/ffts_trig.h index 258c176..cfed2fb 100644 --- a/src/ffts_trig.h +++ b/src/ffts_trig.h @@ -45,4 +45,9 @@ ffts_generate_cosine_sine_32f(ffts_cpx_32f *const table, int table_size); int ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size); +int +ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, + int sign, + int invert); + #endif /* FFTS_TRIG_H */ diff --git a/src/patterns.c b/src/patterns.c index 158ff89..be89265 100644 --- a/src/patterns.c +++ b/src/patterns.c @@ -37,7 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include -#include #ifdef HAVE_STDLIB_H #include -- cgit v1.1 From e8ec1ae614ecb4cbed7de0ecb298e2979bf39f13 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 15 Jul 2015 00:39:51 +0300 Subject: Remove some dead code --- src/ffts_internal.h | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 59f46f8..912a198 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -52,21 +52,6 @@ #include -#define FFTS_PREFIX ffts - -#ifndef FFTS_CAT_PREFIX2 -#define FFTS_CAT_PREFIX2(a,b) a ## b -#endif - -#ifndef FFTS_CAT_PREFIX -#define FFTS_CAT_PREFIX(a,b) FFTS_CAT_PREFIX2(a ## _, b) -#endif - -/* prevent symbol name clashes */ -#ifdef FFTS_PREFIX -#define FUNC_TO_REWRITE FFTS_CAT_PREFIX(FFTS_PREFIX, FUNC_TO_REWRITE) -#endif - #ifdef __ANDROID__ #include #define LOG(s) __android_log_print(ANDROID_LOG_ERROR, "FFTS", s) @@ -74,10 +59,6 @@ #define LOG(s) fprintf(stderr, s) #endif -#ifndef M_PI -#define M_PI 3.1415926535897932384626433832795028841971693993751058209 -#endif - struct _ffts_plan_t; typedef void (*transform_func_t)(struct _ffts_plan_t *p, const void *in, void *out); -- cgit v1.1 From 7738a0953c40c6143252018fde2f4b75a10db66e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 15 Jul 2015 17:10:36 +0300 Subject: Improve compiler optimization by turning "patterns.c" to "patterns.h" --- CMakeLists.txt | 1 - src/patterns.c | 202 ---------------------- src/patterns.h | 536 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 506 insertions(+), 233 deletions(-) delete mode 100644 src/patterns.c diff --git a/CMakeLists.txt b/CMakeLists.txt index db53dd5..5b85fb1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -223,7 +223,6 @@ set(FFTS_SOURCES src/ffts_static.c src/ffts_static.h src/macros.h - src/patterns.c src/patterns.h src/types.h ) diff --git a/src/patterns.c b/src/patterns.c deleted file mode 100644 index be89265..0000000 --- a/src/patterns.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - -This file is part of FFTS -- The Fastest Fourier Transform in the South - -Copyright (c) 2012, Anthony M. Blake -Copyright (c) 2012, The University of Waikato - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of the organization nor the -names of its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - -#include "patterns.h" -#include "ffts_internal.h" - -#include -#include -#include - -#ifdef HAVE_STDLIB_H -#include -#endif - -static void ffts_permute_addr(int N, int offset, int stride, int *d) -{ - int a[4] = {0,2,1,3}; - int i; - - for (i = 0; i < 4; i++) { - d[i] = offset + (a[i] << stride); - if (d[i] < 0) { - d[i] += N; - } - } -} - -static void ffts_hardcodedleaf_is_rec(ptrdiff_t **is, int big_N, int N, int poffset, int offset, int stride, int even, int VL) -{ - if (N > 4) { - ffts_hardcodedleaf_is_rec(is, big_N, N/2, poffset, offset, stride + 1, even, VL); - - if (N/4 >= 4) { - ffts_hardcodedleaf_is_rec(is, big_N, N/4, poffset + (1 << stride), offset + (N/2), stride + 2, 0, VL); - ffts_hardcodedleaf_is_rec(is, big_N, N/4, poffset - (1 << stride), offset + (3*N/4), stride + 2, 0, VL); - } else { - int temp = poffset + (1 << stride); - - if (temp < 0) { - temp += big_N; - } - - temp *= 2; - - if (!(temp % (2 * VL))) { - int i; - - (*is)[0] = poffset + (1 << stride); - (*is)[1] = poffset + (1 << stride) + (1 << (stride + 2)); - (*is)[2] = poffset - (1 << stride); - (*is)[3] = poffset - (1 << stride) + (1 << (stride + 2)); - - for (i = 0; i < 4; i++) { - if ((*is)[i] < 0) { - (*is)[i] += big_N; - } - } - - for (i = 0; i < 4; i++) { - (*is)[i] *= 2; - } - - *is += 4; - } - } - } else if (N == 4) { - int perm[4]; - - ffts_permute_addr(big_N, poffset, stride, perm); - - if (!((2 * perm[0]) % (2 * VL))) { - int i; - - for (i = 0; i < 4; i++) { - (*is)[i] = 2 * perm[i]; - } - - *is += 4; - } - } -} - -ptrdiff_t *ffts_init_is(size_t N, size_t leaf_N, int VL) -{ - int i, i0, i1, i2; - int stride = ffts_ctzl(N/leaf_N); - ptrdiff_t *is, *pis; - - is = malloc(N / VL * sizeof(*is)); - if (!is) { - return NULL; - } - - i0 = N/leaf_N/3 + 1; - i1 = i2 = N/leaf_N/3; - if ((N/leaf_N) % 3 > 1) { - i1++; - } - - pis = is; - for (i = 0; i < i0; i++) { - ffts_hardcodedleaf_is_rec(&pis, N, leaf_N, i, 0, stride, 1, VL); - } - - for (i = i0; i < i0 + i1; i++) { - ffts_hardcodedleaf_is_rec(&pis, N, leaf_N / 2, i, 0, stride + 1, 1, VL); - ffts_hardcodedleaf_is_rec(&pis, N, leaf_N / 2, i - (1 << stride), 0, stride + 1, 1, VL); - } - - for (i = 0 - i2; i < 0; i++) { - ffts_hardcodedleaf_is_rec(&pis, N, leaf_N, i, 0, stride, 1, VL); - } - - return is; -} - -static void ffts_elaborate_offsets(ptrdiff_t *offsets, int leafN, int N, int ioffset, int ooffset, int stride, int even) -{ - if ((even && N == leafN) || (!even && N <= leafN)) { - offsets[2 * (ooffset / leafN) + 0] = ioffset * 2; - offsets[2 * (ooffset / leafN) + 1] = ooffset; - } else if (N > 4) { - ffts_elaborate_offsets(offsets, leafN, N/2, ioffset, ooffset, stride + 1, even); - ffts_elaborate_offsets(offsets, leafN, N/4, ioffset + (1<= leafN) { - ffts_elaborate_offsets(offsets, leafN, N/4, ioffset - (1< b) - (a < b); -} - -ptrdiff_t *ffts_init_offsets(size_t N, size_t leaf_N) -{ - ptrdiff_t *offsets, *tmp; - size_t i; - - offsets = malloc(N/leaf_N * sizeof(*offsets)); - if (!offsets) { - return NULL; - } - - tmp = malloc(2 * N/leaf_N * sizeof(*tmp)); - if (!tmp) { - free(offsets); - return NULL; - } - - ffts_elaborate_offsets(tmp, leaf_N, N, 0, 0, 1, 1); - - for (i = 0; i < 2*N/leaf_N; i += 2) { - if (tmp[i] < 0) { - tmp[i] = N + tmp[i]; - } - } - - qsort(tmp, N/leaf_N, 2 * sizeof(*tmp), ffts_compare_offsets); - - for (i = 0; i < N/leaf_N; i++) { - offsets[i] = 2 * tmp[2*i + 1]; - } - - free(tmp); - return offsets; -} \ No newline at end of file diff --git a/src/patterns.h b/src/patterns.h index d172651..1a98540 100644 --- a/src/patterns.h +++ b/src/patterns.h @@ -1,33 +1,34 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato +Copyright (c) 2015, Jukka Ojanen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -38,9 +39,484 @@ #pragma once #endif +#include "ffts_internal.h" + #include -ptrdiff_t *ffts_init_is(size_t N, size_t leaf_N, int VL); -ptrdiff_t *ffts_init_offsets(size_t N, size_t leaf_N); +#ifdef HAVE_STDLIB_H +#include +#endif + +#ifndef LEAF_N +#define LEAF_N 8 +#endif + +#if LEAF_N == 8 +static void +ffts_elaborate_offsets_even8(ptrdiff_t *const offsets, + int log_N); + +static void +ffts_elaborate_offsets_odd8(ptrdiff_t *const offsets, + int log_N, + int input_offset, + int output_offset, + int stride); + +static void +ffts_hardcodedleaf_is_rec_even4(ptrdiff_t **is, + int big_N, + int offset, + int stride, + int VL); + +static void +ffts_hardcodedleaf_is_rec_even8(ptrdiff_t **is, + int big_N, + int offset, + int stride, + int VL); +#else +static void +ffts_elaborate_offsets_even(ptrdiff_t *const offsets, + int leaf_N, + int N, + int input_offset, + int output_offset, + int stride); + +static void +ffts_elaborate_offsets_odd(ptrdiff_t *const offsets, + int leaf_N, + int N, + int input_offset, + int output_offset, + int stride); + +static void +ffts_hardcodedleaf_is_rec_even(ptrdiff_t **is, + int big_N, + int N, + int offset, + int stride, + int VL); + +static void +ffts_hardcodedleaf_is_rec_odd(ptrdiff_t **is, + int big_N, + int N, + int offset, + int stride, + int VL); +#endif + +static int +ffts_compare_offsets(const void *pa, const void *pb) +{ + const ptrdiff_t a = *(const ptrdiff_t*) pa; + const ptrdiff_t b = *(const ptrdiff_t*) pb; + return (a > b) - (a < b); +} + +static void +ffts_permute_addr(int N, int offset, int stride, int *const d) +{ + int a[4] = {0,2,1,3}; + int i; + + for (i = 0; i < 4; i++) { + d[i] = offset + (a[i] << stride); + if (d[i] < 0) { + d[i] += N; + } + } +} + +#if LEAF_N == 8 +static void +ffts_elaborate_offsets_even8(ptrdiff_t *const offsets, int log_N) +{ + int offset = 1 << (log_N - 4); + int stride = 1; + + offsets[0] = 0; + offsets[1] = 0; + offsets[2] = offset * 2; + offsets[3] = 8; + offsets[4] = offset; + offsets[5] = 16; + offsets[6] = -offset; + offsets[7] = 24; + + for(; log_N > 5; --log_N, stride *= 2) { + ffts_elaborate_offsets_odd8(offsets, log_N - 2, + stride, 1 << (log_N - 1), stride * 4); + + ffts_elaborate_offsets_odd8(offsets, log_N - 2, + -stride, 3 * (1 << (log_N - 2)), stride * 4); + } +} + +static void +ffts_elaborate_offsets_odd8(ptrdiff_t *const offsets, + int log_N, + int input_offset, + int output_offset, + int stride) +{ + if (log_N <= 4) { + offsets[(output_offset / 4) + 0] = input_offset * 2; + offsets[(output_offset / 4) + 1] = output_offset; + + if (log_N == 4) { + offsets[(output_offset / 4) + 2] = (input_offset + stride) * 2; + offsets[(output_offset / 4) + 3] = output_offset + 8; + } + } else { + ffts_elaborate_offsets_odd8(offsets, log_N - 1, input_offset, + output_offset, stride * 2); + + ffts_elaborate_offsets_odd8(offsets, log_N - 2, input_offset + stride, + output_offset + (1 << (log_N - 1)), stride * 4); + + ffts_elaborate_offsets_odd8(offsets, log_N - 2, input_offset - stride, + output_offset + 3 * (1 << (log_N - 2)), stride * 4); + } +} + +static void +ffts_hardcodedleaf_is_rec_even4(ptrdiff_t **is, + int big_N, + int offset, + int stride, + int VL) +{ + int i, perm[4]; + + ffts_permute_addr(big_N, offset, stride, perm); + + if (!((2 * perm[0]) % (2 * VL))) { + for (i = 0; i < 4; i++) { + (*is)[i] = 2 * perm[i]; + } + + *is += 4; + } +} + +static void +ffts_hardcodedleaf_is_rec_even8(ptrdiff_t **is, + int big_N, + int offset, + int stride, + int VL) +{ + int temp; + + ffts_hardcodedleaf_is_rec_even4(is, big_N, offset, stride + 1, VL); + + temp = offset + (1 << stride); + if (temp < 0) { + temp += big_N; + } + + temp *= 2; + + if (!(temp % (2 * VL))) { + int i; + + (*is)[0] = offset + (1 << stride); + (*is)[1] = offset + (1 << stride) + (1 << (stride + 2)); + (*is)[2] = offset - (1 << stride); + (*is)[3] = offset - (1 << stride) + (1 << (stride + 2)); + + for (i = 0; i < 4; i++) { + if ((*is)[i] < 0) { + (*is)[i] += big_N; + } + } + + for (i = 0; i < 4; i++) { + (*is)[i] *= 2; + } + + *is += 4; + } +} +#else +static void +ffts_elaborate_offsets_even(ptrdiff_t *const offsets, + int leaf_N, + int N, + int input_offset, + int output_offset, + int stride) +{ + if (N == leaf_N) { + offsets[2 * (output_offset / leaf_N) + 0] = input_offset * 2; + offsets[2 * (output_offset / leaf_N) + 1] = output_offset; + } else if (N > 4) { + ffts_elaborate_offsets_even(offsets, leaf_N, + N/2, input_offset, output_offset, stride * 2); + + ffts_elaborate_offsets_odd(offsets, leaf_N, + N/4, input_offset + stride, output_offset + N/2, stride * 4); + + if (N/4 >= leaf_N) { + ffts_elaborate_offsets_odd(offsets, leaf_N, + N/4, input_offset - stride, output_offset + 3*N/4, stride * 4); + } + } +} + +static void +ffts_elaborate_offsets_odd(ptrdiff_t *const offsets, + int leaf_N, + int N, + int input_offset, + int output_offset, + int stride) +{ + if (N <= leaf_N) { + offsets[2 * (output_offset / leaf_N) + 0] = input_offset * 2; + offsets[2 * (output_offset / leaf_N) + 1] = output_offset; + } else if (N > 4) { + ffts_elaborate_offsets_odd(offsets, leaf_N, N/2, + input_offset, output_offset, stride * 2); + + ffts_elaborate_offsets_odd(offsets, leaf_N, N/4, + input_offset + stride, output_offset + N/2, stride * 4); + + if (N/4 >= leaf_N) { + ffts_elaborate_offsets_odd(offsets, leaf_N, N/4, + input_offset - stride, output_offset + 3*N/4, stride * 4); + } + } +} + +static void +ffts_hardcodedleaf_is_rec_even(ptrdiff_t **is, + int big_N, + int N, + int offset, + int stride, + int VL) +{ + if (N > 4) { + ffts_hardcodedleaf_is_rec_even(is, big_N, N/2, offset, stride + 1, VL); + + if (N/4 >= 4) { + ffts_hardcodedleaf_is_rec_odd( + is, big_N, N/4, offset + (1 << stride), stride + 2, VL); + ffts_hardcodedleaf_is_rec_odd( + is, big_N, N/4, offset - (1 << stride), stride + 2, VL); + } else { + int temp = offset + (1 << stride); + + if (temp < 0) { + temp += big_N; + } + + temp *= 2; + + if (!(temp % (2 * VL))) { + int i; + + (*is)[0] = offset + (1 << stride); + (*is)[1] = offset + (1 << stride) + (1 << (stride + 2)); + (*is)[2] = offset - (1 << stride); + (*is)[3] = offset - (1 << stride) + (1 << (stride + 2)); + + for (i = 0; i < 4; i++) { + if ((*is)[i] < 0) { + (*is)[i] += big_N; + } + } + + for (i = 0; i < 4; i++) { + (*is)[i] *= 2; + } + + *is += 4; + } + } + } else if (N == 4) { + int perm[4]; + + ffts_permute_addr(big_N, offset, stride, perm); + + if (!((2 * perm[0]) % (2 * VL))) { + int i; + + for (i = 0; i < 4; i++) { + (*is)[i] = 2 * perm[i]; + } + + *is += 4; + } + } +} + +static void +ffts_hardcodedleaf_is_rec_odd(ptrdiff_t **is, + int big_N, + int N, + int offset, + int stride, + int VL) +{ + if (N > 4) { + ffts_hardcodedleaf_is_rec_odd(is, big_N, N/2, offset, stride + 1, VL); + + if (N/4 >= 4) { + ffts_hardcodedleaf_is_rec_odd( + is, big_N, N/4, offset + (1 << stride), stride + 2, VL); + ffts_hardcodedleaf_is_rec_odd( + is, big_N, N/4, offset - (1 << stride), stride + 2, VL); + } else { + int temp = offset + (1 << stride); + + if (temp < 0) { + temp += big_N; + } + + temp *= 2; + + if (!(temp % (2 * VL))) { + int i; + + (*is)[0] = offset + (1 << stride); + (*is)[1] = offset + (1 << stride) + (1 << (stride + 2)); + (*is)[2] = offset - (1 << stride); + (*is)[3] = offset - (1 << stride) + (1 << (stride + 2)); + + for (i = 0; i < 4; i++) { + if ((*is)[i] < 0) { + (*is)[i] += big_N; + } + } + + for (i = 0; i < 4; i++) { + (*is)[i] *= 2; + } + + *is += 4; + } + } + } else if (N == 4) { + int perm[4]; + + ffts_permute_addr(big_N, offset, stride, perm); + + if (!((2 * perm[0]) % (2 * VL))) { + int i; + + for (i = 0; i < 4; i++) { + (*is)[i] = 2 * perm[i]; + } + + *is += 4; + } + } +} +#endif + +static ptrdiff_t* +ffts_init_is(size_t N, size_t leaf_N, int VL) +{ + int i, i0, i1, i2; + int stride = ffts_ctzl(N/leaf_N); + ptrdiff_t *is, *pis; + + is = malloc(N / VL * sizeof(*is)); + if (!is) { + return NULL; + } + + i0 = N/leaf_N/3 + 1; + i1 = i2 = N/leaf_N/3; + if ((N/leaf_N) % 3 > 1) { + i1++; + } + + pis = is; + +#if LEAF_N == 8 + for (i = 0; i < i0; i++) { + ffts_hardcodedleaf_is_rec_even8( + &pis, N, i, stride, VL); + } + + for (i = i0; i < i0 + i1; i++) { + ffts_hardcodedleaf_is_rec_even4( + &pis, N, i, stride + 1, VL); + ffts_hardcodedleaf_is_rec_even4( + &pis, N, i - (1 << stride), stride + 1, VL); + } + + for (i = 0 - i2; i < 0; i++) { + ffts_hardcodedleaf_is_rec_even8( + &pis, N, i, stride, VL); + } +#else + for (i = 0; i < i0; i++) { + ffts_hardcodedleaf_is_rec_even( + &pis, N, leaf_N, i, stride, VL); + } + + for (i = i0; i < i0 + i1; i++) { + ffts_hardcodedleaf_is_rec_even( + &pis, N, leaf_N / 2, i, stride + 1, VL); + ffts_hardcodedleaf_is_rec_even( + &pis, N, leaf_N / 2, i - (1 << stride), stride + 1, VL); + } + + for (i = 0 - i2; i < 0; i++) { + ffts_hardcodedleaf_is_rec_even( + &pis, N, leaf_N, i, stride, VL); + } +#endif + + return is; +} + +static ptrdiff_t* +ffts_init_offsets(size_t N, size_t leaf_N) +{ + ptrdiff_t *offsets, *tmp; + size_t i; + + offsets = malloc(N/leaf_N * sizeof(*offsets)); + if (!offsets) { + return NULL; + } + + tmp = malloc(2 * N/leaf_N * sizeof(*tmp)); + if (!tmp) { + free(offsets); + return NULL; + } + +#if LEAF_N == 8 + ffts_elaborate_offsets_even8(tmp, ffts_ctzl(N)); +#else + ffts_elaborate_offsets_even(tmp, leaf_N, N, 0, 0, 1); +#endif + + for (i = 0; i < 2*N/leaf_N; i += 2) { + if (tmp[i] < 0) { + tmp[i] += N; + } + } + + qsort(tmp, N/leaf_N, 2 * sizeof(*tmp), ffts_compare_offsets); + + for (i = 0; i < N/leaf_N; i++) { + offsets[i] = 2 * tmp[2*i + 1]; + } + + free(tmp); + return offsets; +} #endif /* FFTS_PATTERNS_H */ -- cgit v1.1 From 5ecfa4aad36caa0d03fe5b738b82eeeb6e024f26 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 15 Jul 2015 17:12:45 +0300 Subject: Remove unreferenced header --- src/patterns.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/patterns.h b/src/patterns.h index 1a98540..69bbe76 100644 --- a/src/patterns.h +++ b/src/patterns.h @@ -39,8 +39,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pragma once #endif -#include "ffts_internal.h" - #include #ifdef HAVE_STDLIB_H -- cgit v1.1 From cb35f8927bc8c6992d41efcc3b972f2d8ee318dc Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 16 Jul 2015 10:45:32 +0300 Subject: Define [pa] and [pb] as constant input variables, not writable outputs --- src/ffts_internal.h | 1 - src/ffts_real.c | 12 +++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 912a198..14d037d 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -35,7 +35,6 @@ #define FFTS_INTERNAL_H //#include "config.h" -//#include "codegen.h" #include "ffts_attributes.h" #include "types.h" diff --git a/src/ffts_real.c b/src/ffts_real.c index f6e6127..6650d07 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -110,7 +110,7 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) #endif #ifdef __ARM_NEON__ - for (i = 0; i < N/2; i += 2) { + for (i = 0; i < N; i += 4) { __asm__ __volatile__ ( "vld1.32 {q8}, [%[pa]]!\n\t" "vld1.32 {q9}, [%[pb]]!\n\t" @@ -146,9 +146,8 @@ ffts_execute_1d_real(ffts_plan_t *p, const void *input, void *output) "vadd.f32 q13, q13, q15\n\t" "vadd.f32 q12, q12, q13\n\t" "vst1.32 {q12}, [%[pout]]!\n\t" - : [pa] "+r" (A), [pb] "+r" (B), [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), - [pout] "+r" (p_out) - : + : [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), [pout] "+r" (p_out) + : [pa] "r" (A), [pb] "r" (B) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } @@ -403,9 +402,8 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) "vsub.f32 q13, q13, q15\n\t" "vadd.f32 q12, q12, q13\n\t" "vst1.32 {q12}, [%[pout]]!\n\t" - : [pa] "+r" (A), [pb] "+r" (B), [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), - [pout] "+r" (p_out) - : + : [buf0] "+r" (p_buf0), [buf1] "+r" (p_buf1), [pout] "+r" (p_out) + : [pa] "r" (A), [pb] "r" (B) : "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -- cgit v1.1 From e1a92c370e5bd57a29f4ad66c72bae1078275f62 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 30 Jul 2015 12:06:13 +0300 Subject: Detect presence of malloc.h, fixes anthonix/ffts#40 --- CMakeLists.txt | 5 +++++ src/ffts_internal.h | 3 +++ 2 files changed, 8 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5b85fb1..63e636f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,12 +39,17 @@ include(CMakePushCheckState) add_definitions(-DFFTS_CMAKE_GENERATED) # check existence of various headers +check_include_file(malloc.h HAVE_MALLOC_H) check_include_file(stdint.h HAVE_STDINT_H) check_include_file(stdlib.h HAVE_STDLIB_H) check_include_file(string.h HAVE_STRING_H) check_include_file(sys/mman.h HAVE_SYS_MMAN_H) check_include_file(unistd.h HAVE_UNISTD_H) +if(HAVE_MALLOC_H) + add_definitions(-DHAVE_MALLOC_H) +endif(HAVE_MALLOC_H) + if(HAVE_STDINT_H) add_definitions(-DHAVE_STDINT_H) endif(HAVE_STDINT_H) diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 14d037d..30e814b 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -38,7 +38,10 @@ #include "ffts_attributes.h" #include "types.h" +#ifdef HAVE_MALLOC_H #include +#endif + #include #ifdef HAVE_STDINT_H -- cgit v1.1 From cdf9015f3f150b56fde42015868c3eeb65aaf486 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 30 Jul 2015 12:15:15 +0300 Subject: Control reaches end of non-void function --- src/ffts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ffts.c b/src/ffts.c index 6e12563..a04a92c 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -119,8 +119,8 @@ static FFTS_INLINE int ffts_flush_instruction_cache(void *start, size_t length) #elif __GNUC__ __clear_cache((long) start, (long) start + length); #endif - return 0; #endif + return 0; #endif } -- cgit v1.1 From 5f0db6e851fc5d0b3db83b140e81cd7b0d4733f0 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 28 Aug 2015 11:55:01 +0300 Subject: No need to display the size of transform --- src/codegen.c | 34 +++++++++++++++++----------------- src/codegen_sse.h | 28 ++++++++++++++++++---------- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/src/codegen.c b/src/codegen.c index fc407cb..c4e19e6 100644 --- a/src/codegen.c +++ b/src/codegen.c @@ -9,14 +9,14 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the organization nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -199,7 +199,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N generate_leaf_init(&fp, loop_count); if (ffts_ctzl(N) & 1) { - generate_leaf_ee(&fp, offsets, p->i1 ? 6 : 0); + generate_leaf_ee(&fp, offsets, p->i1 ? 6 : 0); if (p->i1) { loop_count += 4 * p->i1; @@ -209,14 +209,14 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N loop_count += 4; generate_leaf_oe(&fp, offsets_o); } else { - generate_leaf_ee(&fp, offsets, N >= 256 ? 2 : 8); + generate_leaf_ee(&fp, offsets, N >= 256 ? 2 : 8); loop_count += 4; generate_leaf_eo(&fp, offsets); if (p->i1) { loop_count += 4 * p->i1; - generate_leaf_oo(&fp, loop_count, offsets_o, N >= 256 ? 4 : 7); + generate_leaf_oo(&fp, loop_count, offsets_o, N >= 256 ? 4 : 7); } } @@ -309,7 +309,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N #ifdef __arm__ #ifdef HAVE_NEON - if(__builtin_ctzl(N) & 1) { + if (ffts_ctzl(N) & 1) { ADDI(&fp, 2, 7, 0); ADDI(&fp, 7, 9, 0); ADDI(&fp, 9, 2, 0); @@ -535,9 +535,9 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N if(pps[0] - pN) ADDI(&fp, 1, 1, pps[0] - pN); } - if(p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8 - pLUT) - ADDI(&fp, 2, 2, p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8 - pLUT); - + if (p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8 - pLUT) { + ADDI(&fp, 2, 2, p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8 - pLUT); + } if(pps[0] == 2 * leaf_N) { *fp = BL(fp+2, x_4_addr); @@ -574,7 +574,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N pAddr = pps[1] * 4; pN = pps[0]; - pLUT = p->ws_is[__builtin_ctzl(pps[0]/leaf_N)-1]*8;//LUT_offset(pps[0], leafN); + pLUT = p->ws_is[ffts_ctzl(pps[0]/leaf_N)-1]*8;//LUT_offset(pps[0], leafN); // fprintf(stderr, "LUT offset for %d is %d\n", pN, pLUT); count += 4; pps += 2; @@ -594,7 +594,7 @@ transform_func_t ffts_generate_func_code(ffts_plan_t *p, size_t N, size_t leaf_N //fprintf(stderr, "\n"); //for(int i=0;i Date: Fri, 28 Aug 2015 17:03:07 +0300 Subject: Avoid problems with different versions of CMakePushCheckState macros and fix float-abi test --- CMakeLists.txt | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 63e636f..c3c703d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,6 @@ option(ENABLE_SHARED include(CheckCSourceCompiles) include(CheckIncludeFile) include(CheckSymbolExists) -include(CMakePushCheckState) add_definitions(-DFFTS_CMAKE_GENERATED) @@ -76,49 +75,43 @@ if(NOT CMAKE_CROSSCOMPILING) # Determinate what floating-point hardware # (or hardware emulation) is available # - cmake_push_check_state() + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) # Try to execute quietly without messages set(CMAKE_REQUIRED_QUIET 1) # Test compilation with -mfpu=neon - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfpu=neon") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon") check_symbol_exists(exit stdlib.h NEON_AVAILABLE) if(NOT NEON_AVAILABLE) - cmake_reset_check_state() - # Test compilation with -mfpu=vfp - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfpu=vfp") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=vfp") check_symbol_exists(exit stdlib.h VFP_AVAILABLE) if(NOT VFP_AVAILABLE) message(WARNING "FFTS is using 'soft' FPU") else() message("FFTS is using 'vfp' FPU") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") - set(ENABLE_NEON 0) set(ENABLE_VFP 1) endif(NOT SOFTFP_AVAILABLE) else() message("FFTS is using 'neon' FPU") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") - set(ENABLE_NEON 1) set(ENABLE_VFP 0) endif(NOT NEON_AVAILABLE) # Determinate float ABI if NEON or VFP is used if(NEON_AVAILABLE OR VFP_AVAILABLE) - cmake_push_check_state() + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) # Test compilation with -mfloat-abi=hard - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=hardfp") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=hard") check_symbol_exists(exit stdlib.h HARDFP_AVAILABLE) if(NOT HARDFP_AVAILABLE) - cmake_reset_check_state() - - # Test compilation with -mfloat-abi=hard - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=softfp") + # Test compilation with -mfloat-abi=softfp + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=softfp") check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE) if(NOT SOFTFP_AVAILABLE) # Most likely development libraries are missing @@ -131,11 +124,7 @@ if(NOT CMAKE_CROSSCOMPILING) message(WARNING "FFTS is using 'hard' float ABI") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard") endif(NOT HARDFP_AVAILABLE) - - cmake_pop_check_state() endif(NEON_AVAILABLE OR VFP_AVAILABLE) - - cmake_pop_check_state() else() # check if the platform has support for SSE intrinsics check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) -- cgit v1.1 From 1febfc29c497be74b4cdc3b404a1afdab0e9f9e7 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 29 Aug 2015 17:48:27 +0300 Subject: Improve (fix) ARM architecture detection Cross-compiling still needs work --- CMakeLists.txt | 138 ++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 107 insertions(+), 31 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c3c703d..13474dd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,8 +32,8 @@ option(ENABLE_SHARED ) include(CheckCSourceCompiles) +include(CheckCSourceRuns) include(CheckIncludeFile) -include(CheckSymbolExists) add_definitions(-DFFTS_CMAKE_GENERATED) @@ -72,72 +72,148 @@ endif(HAVE_UNISTD_H) # Determinate if we are cross-compiling if(NOT CMAKE_CROSSCOMPILING) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") - # Determinate what floating-point hardware - # (or hardware emulation) is available - # + # Determinate ARM architecture set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) # Try to execute quietly without messages set(CMAKE_REQUIRED_QUIET 1) - # Test compilation with -mfpu=neon - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon") - check_symbol_exists(exit stdlib.h NEON_AVAILABLE) - if(NOT NEON_AVAILABLE) - # Test compilation with -mfpu=vfp + # The test for ARM architecture + set(TEST_SOURCE_CODE "int main() { return 0; }") + + # GCC documentation says "native" is only supported on Linux, but let's try + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=native") + check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_NATIVE_FLAG_SUPPORTED) + + if(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED) + # Fallback trying generic ARMv7 + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv7") + check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV7_FLAG_SUPPORTED) + + if(NOT GCC_MARCH_ARMV7_FLAG_SUPPORTED) + # Fallback trying generic ARMv6 + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv6") + check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV6_FLAG_SUPPORTED) + + if(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED) + message(WARNING "FFTS failed to determinate ARM architecture") + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) + else() + message("FFTS is build using 'march=armv6'") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv6") + endif(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED) + else() + message("FFTS is build using 'march=armv7'") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7") + endif(NOT GCC_MARCH_ARMV7_FLAG_SUPPORTED) + else() + message("FFTS is build using 'march=native'") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native") + endif(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED) + + # Determinate what floating-point hardware (or hardware emulation) is available + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + + # The test for ARM NEON support + set(TEST_SOURCE_CODE " + #include + int main() + { + float32x4_t v; + float zeros[4] = {0.0f, 0.0f, 0.0f, 0.0f}; + v = vld1q_f32(zeros); + return 0; + }" + ) + + # Test running with -mfpu=neon + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=softfp") + check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SUPPORTED) + + if(NOT NEON_SUPPORTED) + # Fallback using VFP if NEON is not supported + if(ENABLE_NEON) + message(FATAL_ERROR "FFTS cannot enable NEON on this platform") + endif(ENABLE_NEON) + + # Test for ARM VFP support + set(TEST_SOURCE_CODE " + double sum(double a, double b) + { + return a + b; + } + int main() + { + double s1, s2, v1 = 1.0, v2 = 2.0, v3 = 1.0e-322; + s1 = sum(v1, v2); + s2 = sum(v3, v3); + return 0; + }" + ) + + # Test running with -mfpu=vfp set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=vfp") - check_symbol_exists(exit stdlib.h VFP_AVAILABLE) - if(NOT VFP_AVAILABLE) + check_c_source_runs("${TEST_SOURCE_CODE}" VFP_SUPPORTED) + + if(NOT VFP_SUPPORTED) + # Fallback using emulation if VFP is not supported + if(ENABLE_VFP) + message(FATAL_ERROR "FFTS cannot enable VFP on this platform") + endif(ENABLE_VFP) + message(WARNING "FFTS is using 'soft' FPU") else() message("FFTS is using 'vfp' FPU") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") - set(ENABLE_NEON 0) - set(ENABLE_VFP 1) - endif(NOT SOFTFP_AVAILABLE) + set(ENABLE_VFP ON) + endif(NOT VFP_SUPPORTED) else() message("FFTS is using 'neon' FPU") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") - set(ENABLE_NEON 1) - set(ENABLE_VFP 0) - endif(NOT NEON_AVAILABLE) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon") + set(ENABLE_NEON ON) + endif(NOT NEON_SUPPORTED) # Determinate float ABI if NEON or VFP is used - if(NEON_AVAILABLE OR VFP_AVAILABLE) + if(NEON_SUPPORTED OR VFP_SUPPORTED) set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) - # Test compilation with -mfloat-abi=hard + # Test running with -mfloat-abi=hard set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=hard") - check_symbol_exists(exit stdlib.h HARDFP_AVAILABLE) - if(NOT HARDFP_AVAILABLE) - # Test compilation with -mfloat-abi=softfp + + # Use the same test as before + check_c_source_runs("${TEST_SOURCE_CODE}" HARDFP_SUPPORTED) + + if(NOT HARDFP_SUPPORTED) + # Test running with -mfloat-abi=softfp set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=softfp") - check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE) - if(NOT SOFTFP_AVAILABLE) + check_c_source_runs("${TEST_SOURCE_CODE}" SOFTFP_SUPPORTED) + + if(NOT SOFTFP_SUPPORTED) # Most likely development libraries are missing message(WARNING "FFTS is using 'soft' float ABI") else() message("FFTS is using 'softfp' float ABI") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") - endif(NOT SOFTFP_AVAILABLE) + endif(NOT SOFTFP_SUPPORTED) else() - message(WARNING "FFTS is using 'hard' float ABI") + message("FFTS is using 'hard' float ABI") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard") - endif(NOT HARDFP_AVAILABLE) - endif(NEON_AVAILABLE OR VFP_AVAILABLE) + endif(NOT HARDFP_SUPPORTED) + endif(NEON_SUPPORTED OR VFP_SUPPORTED) else() # check if the platform has support for SSE intrinsics check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) if(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) endif(HAVE_XMMINTRIN_H) - + # check if the platform has support for SSE2 intrinsics check_include_file(emmintrin.h HAVE_EMMINTRIN_H) if(HAVE_EMMINTRIN_H) add_definitions(-DHAVE_SSE2) endif(HAVE_EMMINTRIN_H) - + # check if the platform has support for SSE3 intrinsics check_include_file(pmmintrin.h HAVE_PMMINTRIN_H) if(HAVE_PMMINTRIN_H) @@ -165,7 +241,7 @@ if(NOT CMAKE_CROSSCOMPILING) endif(HAVE_PMMINTRIN_H) endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") else() - # Check if we can always use detection code above? + # TODO: Add detections for compiler support and headers endif(NOT CMAKE_CROSSCOMPILING) # compiler settings -- cgit v1.1 From 51c448d4c0cb3655713ca894775fb5f219f8b7c1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Sat, 29 Aug 2015 19:50:56 +0300 Subject: Correct flag is 'march=armv7-a' and apply the same flags for ASM --- CMakeLists.txt | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13474dd..593f418 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,10 +87,10 @@ if(NOT CMAKE_CROSSCOMPILING) if(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED) # Fallback trying generic ARMv7 - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv7") - check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV7_FLAG_SUPPORTED) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv7-a") + check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV7A_FLAG_SUPPORTED) - if(NOT GCC_MARCH_ARMV7_FLAG_SUPPORTED) + if(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED) # Fallback trying generic ARMv6 set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv6") check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV6_FLAG_SUPPORTED) @@ -100,14 +100,17 @@ if(NOT CMAKE_CROSSCOMPILING) set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) else() message("FFTS is build using 'march=armv6'") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv6") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv6") endif(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED) else() - message("FFTS is build using 'march=armv7'") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7") - endif(NOT GCC_MARCH_ARMV7_FLAG_SUPPORTED) + message("FFTS is build using 'march=armv7-a'") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv7-a") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a") + endif(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED) else() message("FFTS is build using 'march=native'") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=native") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native") endif(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED) @@ -164,11 +167,13 @@ if(NOT CMAKE_CROSSCOMPILING) message(WARNING "FFTS is using 'soft' FPU") else() message("FFTS is using 'vfp' FPU") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=vfp") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") set(ENABLE_VFP ON) endif(NOT VFP_SUPPORTED) else() message("FFTS is using 'neon' FPU") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon") set(ENABLE_NEON ON) @@ -194,10 +199,12 @@ if(NOT CMAKE_CROSSCOMPILING) message(WARNING "FFTS is using 'soft' float ABI") else() message("FFTS is using 'softfp' float ABI") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=softfp") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp") endif(NOT SOFTFP_SUPPORTED) else() message("FFTS is using 'hard' float ABI") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=hard") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard") endif(NOT HARDFP_SUPPORTED) endif(NEON_SUPPORTED OR VFP_SUPPORTED) -- cgit v1.1 From 896904f94299a3feb97271cfecec69834c59646f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 15 Sep 2015 17:53:19 +0300 Subject: Extended constant tables to double-double arithmetic --- src/ffts_trig.c | 164 +++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 115 insertions(+), 49 deletions(-) diff --git a/src/ffts_trig.c b/src/ffts_trig.c index 514a1e5..883d0c5 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -33,44 +33,110 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts_trig.h" /* 1/(2*cos(pow(2,-p)*pi)) */ -static const FFTS_ALIGN(16) unsigned int half_secant[66] = { - 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, - 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, 0x00000000, 0x3fe00000, - 0x00000001, 0x3fe00000, 0x00000005, 0x3fe00000, 0x00000014, 0x3fe00000, - 0x0000004f, 0x3fe00000, 0x0000013c, 0x3fe00000, 0x000004ef, 0x3fe00000, - 0x000013bd, 0x3fe00000, 0x00004ef5, 0x3fe00000, 0x00013bd4, 0x3fe00000, - 0x0004ef4f, 0x3fe00000, 0x0013bd3d, 0x3fe00000, 0x004ef4f3, 0x3fe00000, - 0x013bd3cd, 0x3fe00000, 0x04ef4f34, 0x3fe00000, 0x13bd3cde, 0x3fe00000, - 0x4ef4f46c, 0x3fe00000, 0x3bd3e0e7, 0x3fe00001, 0xef507722, 0x3fe00004, - 0xbd5114f9, 0x3fe00013, 0xf637de7d, 0x3fe0004e, 0xe8190891, 0x3fe0013b, - 0x9436640e, 0x3fe004f0, 0x9c61d971, 0x3fe013d1, 0xd17cba53, 0x3fe0503e, - 0x7bdb3895, 0x3fe1517a, 0x00000000, 0x00000000, 0x00000000, 0x00000000 +static const FFTS_ALIGN(16) unsigned int half_secant[132] = { + 0x00000000, 0x3fe00000, 0xc9be45de, 0x3be3bd3c, + 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c03bd3c, + 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c23bd3c, + 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c43bd3c, + 0x00000000, 0x3fe00000, 0xc9be45de, 0x3c63bd3c, + 0x00000000, 0x3fe00000, 0xc9be45df, 0x3c83bd3c, + 0x00000001, 0x3fe00000, 0x4df22efd, 0x3c7de9e6, + 0x00000005, 0x3fe00000, 0x906e8725, 0xbc60b0cd, + 0x00000014, 0x3fe00000, 0x906e8357, 0xbc80b0cd, + 0x0000004f, 0x3fe00000, 0x0dce83c9, 0xbc5619b2, + 0x0000013c, 0x3fe00000, 0x0dc6e79a, 0xbc7619b2, + 0x000004ef, 0x3fe00000, 0xe4af1240, 0x3c83cc9b, + 0x000013bd, 0x3fe00000, 0x2d14c08a, 0x3c7e64df, + 0x00004ef5, 0x3fe00000, 0x47a85465, 0xbc59b20b, + 0x00013bd4, 0x3fe00000, 0xab79c897, 0xbc79b203, + 0x0004ef4f, 0x3fe00000, 0x15019a96, 0x3c79386b, + 0x0013bd3d, 0x3fe00000, 0x7d6dbf4b, 0xbc7b16b7, + 0x004ef4f3, 0x3fe00000, 0xf30832e0, 0x3c741ee4, + 0x013bd3cd, 0x3fe00000, 0xd3bcd4bb, 0xbc83f41e, + 0x04ef4f34, 0x3fe00000, 0xdd75aebb, 0xbc82ef06, + 0x13bd3cde, 0x3fe00000, 0xb2b41b3d, 0x3c52d979, + 0x4ef4f46c, 0x3fe00000, 0x4f0fb458, 0xbc851db3, + 0x3bd3e0e7, 0x3fe00001, 0x8a0ce3f0, 0x3c58dbab, + 0xef507722, 0x3fe00004, 0x2a8ec295, 0x3c83e351, + 0xbd5114f9, 0x3fe00013, 0xc4c0d92d, 0x3c8b3ca4, + 0xf637de7d, 0x3fe0004e, 0xb74de729, 0x3c45974e, + 0xe8190891, 0x3fe0013b, 0x26edf4da, 0xbc814c20, + 0x9436640e, 0x3fe004f0, 0xe2b34b50, 0x3c8091ab, + 0x9c61d971, 0x3fe013d1, 0x6ce01b8e, 0x3c7f7df7, + 0xd17cba53, 0x3fe0503e, 0x74ad7633, 0xbc697609, + 0x7bdb3895, 0x3fe1517a, 0x82f9091b, 0xbc8008d1, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; /* cos(pow(2,-p)*pi), sin(pow(2,-p)*pi) */ -static const FFTS_ALIGN(16) unsigned int cos_sin_pi_table[132] = { - 0x00000000, 0x3ff00000, 0x54442d18, 0x3e0921fb, 0x00000000, 0x3ff00000, - 0x54442d18, 0x3e0921fb, 0x00000000, 0x3ff00000, 0x54442d18, 0x3e1921fb, - 0x00000000, 0x3ff00000, 0x54442d18, 0x3e2921fb, 0x00000000, 0x3ff00000, - 0x54442d18, 0x3e3921fb, 0xffffffff, 0x3fefffff, 0x54442d18, 0x3e4921fb, - 0xfffffffe, 0x3fefffff, 0x54442d18, 0x3e5921fb, 0xfffffff6, 0x3fefffff, - 0x54442d16, 0x3e6921fb, 0xffffffd9, 0x3fefffff, 0x54442d0e, 0x3e7921fb, - 0xffffff62, 0x3fefffff, 0x54442cef, 0x3e8921fb, 0xfffffd88, 0x3fefffff, - 0x54442c73, 0x3e9921fb, 0xfffff621, 0x3fefffff, 0x54442a83, 0x3ea921fb, - 0xffffd886, 0x3fefffff, 0x544422c2, 0x3eb921fb, 0xffff6216, 0x3fefffff, - 0x544403c1, 0x3ec921fb, 0xfffd8858, 0x3fefffff, 0x544387ba, 0x3ed921fb, - 0xfff62162, 0x3fefffff, 0x544197a1, 0x3ee921fb, 0xffd88586, 0x3fefffff, - 0x5439d73a, 0x3ef921fb, 0xff62161a, 0x3fefffff, 0x541ad59e, 0x3f0921fb, - 0xfd885867, 0x3fefffff, 0x539ecf31, 0x3f1921fb, 0xf621619c, 0x3fefffff, - 0x51aeb57c, 0x3f2921fb, 0xd8858675, 0x3fefffff, 0x49ee4ea6, 0x3f3921fb, - 0x62161a34, 0x3fefffff, 0x2aecb360, 0x3f4921fb, 0x88586ee6, 0x3feffffd, - 0xaee6472e, 0x3f5921fa, 0x21621d02, 0x3feffff6, 0xbecca4ba, 0x3f6921f8, - 0x858e8a92, 0x3fefffd8, 0xfe670071, 0x3f7921f0, 0x169b92db, 0x3fefff62, - 0xfcdec784, 0x3f8921d1, 0x6084cd0d, 0x3feffd88, 0xf7a3667e, 0x3f992155, - 0xe3796d7e, 0x3feff621, 0xf10dd814, 0x3fa91f65, 0xa3d12526, 0x3fefd88d, - 0xbc29b42c, 0x3fb917a6, 0xcff75cb0, 0x3fef6297, 0x3c69a60b, 0x3fc8f8b8, - 0xcf328d46, 0x3fed906b, 0xa6aea963, 0x3fd87de2, 0x667f3bcd, 0x3fe6a09e, - 0x667f3bcd, 0x3fe6a09e, 0x00000000, 0x00000000, 0x00000000, 0x3ff00000 +static const FFTS_ALIGN(16) unsigned int cos_sin_pi_table[264] = { + 0x00000000, 0x3ff00000, 0xc9be45de, 0xbbf3bd3c, + 0x54442d18, 0x3df921fb, 0xbb77974f, 0x3a91a390, + 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc13bd3c, + 0x54442d18, 0x3e0921fb, 0x54a14928, 0x3aa19bd0, + 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc33bd3c, + 0x54442d18, 0x3e1921fb, 0xb948108a, 0x3ab17cce, + 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc53bd3c, + 0x54442d18, 0x3e2921fb, 0x4be32e14, 0x3ac100c8, + 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc73bd3c, + 0x54442d18, 0x3e3921fb, 0x2c9f4879, 0x3ace215d, + 0xffffffff, 0x3fefffff, 0x6c837443, 0x3c888586, + 0x54442d18, 0x3e4921fb, 0x0005f376, 0x3acd411f, + 0xfffffffe, 0x3fefffff, 0x4df22ef1, 0xbc8de9e6, + 0x54442d18, 0x3e5921fb, 0x9937209e, 0xbaf7b153, + 0xfffffff6, 0x3fefffff, 0x906e88aa, 0x3c70b0cd, + 0x54442d16, 0x3e6921fb, 0xfe19968a, 0xbb03b7c0, + 0xffffffd9, 0x3fefffff, 0xdf22ed26, 0xbc8e9e64, + 0x54442d0e, 0x3e7921fb, 0x8d1b6ffb, 0xbaee8bb4, + 0xffffff62, 0x3fefffff, 0x0dd18f0f, 0x3c6619b2, + 0x54442cef, 0x3e8921fb, 0x7f2b20fb, 0xbb00e133, + 0xfffffd88, 0x3fefffff, 0x0dd314b2, 0x3c8619b2, + 0x54442c73, 0x3e9921fb, 0x619fdf6e, 0xbb174e98, + 0xfffff621, 0x3fefffff, 0x3764acf5, 0x3c8866c8, + 0x54442a83, 0x3ea921fb, 0xf5b2407f, 0xbb388215, + 0xffffd886, 0x3fefffff, 0x20e7a944, 0xbc8e64df, + 0x544422c2, 0x3eb921fb, 0x7b9b9f23, 0x3b5a0961, + 0xffff6216, 0x3fefffff, 0x52ee25ea, 0x3c69b20e, + 0x544403c1, 0x3ec921fb, 0x4df6a86a, 0xbb5999d9, + 0xfffd8858, 0x3fefffff, 0xd8910ead, 0x3c89b20f, + 0x544387ba, 0x3ed921fb, 0x0809d04d, 0x3b77d9db, + 0xfff62162, 0x3fefffff, 0x438d3925, 0xbc8937a8, + 0x544197a1, 0x3ee921fb, 0xa5d27f7a, 0xbb858b02, + 0xffd88586, 0x3fefffff, 0x94b3ddd2, 0x3c8b22e4, + 0x5439d73a, 0x3ef921fb, 0xf8a3b73d, 0xbb863c7f, + 0xff62161a, 0x3fefffff, 0x7ea469b2, 0xbc835c13, + 0x541ad59e, 0x3f0921fb, 0xb8cee262, 0x3bae9860, + 0xfd885867, 0x3fefffff, 0x23a32e63, 0xbc77d556, + 0x539ecf31, 0x3f1921fb, 0xfcd23a30, 0x3b96b111, + 0xf621619c, 0x3fefffff, 0xbbbd8fe6, 0xbc87507d, + 0x51aeb57c, 0x3f2921fb, 0x4916c435, 0xbbca6e1d, + 0xd8858675, 0x3fefffff, 0x54748eab, 0xbc879f0e, + 0x49ee4ea6, 0x3f3921fb, 0x744a453e, 0x3bde894d, + 0x62161a34, 0x3fefffff, 0xb1f9b9c4, 0xbc6136dc, + 0x2aecb360, 0x3f4921fb, 0x7e566b4c, 0x3be87615, + 0x88586ee6, 0x3feffffd, 0xf173ae5b, 0x3c81af64, + 0xaee6472e, 0x3f5921fa, 0x284a9df8, 0xbbfee52e, + 0x21621d02, 0x3feffff6, 0xebc82813, 0xbc76acfc, + 0xbecca4ba, 0x3f6921f8, 0x7bcab5b2, 0x3c02ba40, + 0x858e8a92, 0x3fefffd8, 0x1883bcf7, 0x3c8359c7, + 0xfe670071, 0x3f7921f0, 0xfe6b7a9b, 0x3bfab967, + 0x169b92db, 0x3fefff62, 0xc81fbd0d, 0x3c85dda3, + 0xfcdec784, 0x3f8921d1, 0xbe836d9d, 0x3c29878e, + 0x6084cd0d, 0x3feffd88, 0x4556e4cb, 0xbc81354d, + 0xf7a3667e, 0x3f992155, 0x091a0130, 0xbbfb1d63, + 0xe3796d7e, 0x3feff621, 0x2e24aa15, 0xbc6c57bc, + 0xf10dd814, 0x3fa91f65, 0x0d569a90, 0xbc2912bd, + 0xa3d12526, 0x3fefd88d, 0x378811c7, 0xbc887df6, + 0xbc29b42c, 0x3fb917a6, 0xd26ed688, 0xbc3e2718, + 0xcff75cb0, 0x3fef6297, 0x2a361fd3, 0x3c756217, + 0x3c69a60b, 0x3fc8f8b8, 0xb9ff8d82, 0xbc626d19, + 0xcf328d46, 0x3fed906b, 0x10231ac2, 0x3c7457e6, + 0xa6aea963, 0x3fd87de2, 0xd3d5a610, 0xbc672ced, + 0x667f3bcd, 0x3fe6a09e, 0x13b26456, 0xbc8bdd34, + 0x667f3bcd, 0x3fe6a09e, 0x13b26456, 0xbc8bdd34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x3ff00000, 0x00000000, 0x00000000 }; int @@ -187,13 +253,13 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) FFTS_ASSUME(log_2 > 1); offset = 32 - log_2; ct = (const ffts_cpx_64f*) - FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[4 * offset]); - hs = (const double*) &half_secant[2 * offset]; + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); + hs = (const double*) &half_secant[4 * offset]; /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { - w[i][0] = ct[i][0]; - w[i][1] = ct[i][1]; + w[i][0] = ct[2*i][0]; + w[i][1] = ct[2*i][2]; } /* generate sine and cosine tables with maximum error less than 0.5 ULP */ @@ -208,8 +274,8 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) /* skip and find next trailing zero */ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); - w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); - w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]); } mid_point: @@ -264,13 +330,13 @@ ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, FFTS_ASSUME(log_2 > 2); offset = 34 - log_2; ct = (const ffts_cpx_64f*) - FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[4 * offset]); - hs = (const double*) &half_secant[2 * offset]; + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); + hs = (const double*) &half_secant[4 * offset]; /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { - w[i][0] = ct[i][0]; - w[i][1] = ct[i][1]; + w[i][0] = ct[2*i][0]; + w[i][1] = ct[2*i][2]; } /* generate sine and cosine tables with maximum error less than 0.5 ULP */ @@ -297,8 +363,8 @@ ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, /* skip and find next trailing zero */ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); - w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); - w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]); } } else { for (i = 1; i < N/4; i++) { @@ -323,8 +389,8 @@ ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, /* skip and find next trailing zero */ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); - w[log_2][0] = hs[log_2] * (w[log_2 + 1][0] + w[offset][0]); - w[log_2][1] = hs[log_2] * (w[log_2 + 1][1] + w[offset][1]); + w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]); } } -- cgit v1.1 From 7ae74d3d18c9113fffd6530c891add60046c7ee1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 16 Sep 2015 14:14:27 +0300 Subject: Change the order of constants; cos_hi, cos_lo, sin_hi, sin_lo -> cos_hi, sin_hi, cos_lo, sin_lo to support 128 bit vectorization --- src/ffts_trig.c | 136 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/src/ffts_trig.c b/src/ffts_trig.c index 883d0c5..624d2c3 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -71,72 +71,72 @@ static const FFTS_ALIGN(16) unsigned int half_secant[132] = { /* cos(pow(2,-p)*pi), sin(pow(2,-p)*pi) */ static const FFTS_ALIGN(16) unsigned int cos_sin_pi_table[264] = { - 0x00000000, 0x3ff00000, 0xc9be45de, 0xbbf3bd3c, - 0x54442d18, 0x3df921fb, 0xbb77974f, 0x3a91a390, - 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc13bd3c, - 0x54442d18, 0x3e0921fb, 0x54a14928, 0x3aa19bd0, - 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc33bd3c, - 0x54442d18, 0x3e1921fb, 0xb948108a, 0x3ab17cce, - 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc53bd3c, - 0x54442d18, 0x3e2921fb, 0x4be32e14, 0x3ac100c8, - 0x00000000, 0x3ff00000, 0xc9be45de, 0xbc73bd3c, - 0x54442d18, 0x3e3921fb, 0x2c9f4879, 0x3ace215d, - 0xffffffff, 0x3fefffff, 0x6c837443, 0x3c888586, - 0x54442d18, 0x3e4921fb, 0x0005f376, 0x3acd411f, - 0xfffffffe, 0x3fefffff, 0x4df22ef1, 0xbc8de9e6, - 0x54442d18, 0x3e5921fb, 0x9937209e, 0xbaf7b153, - 0xfffffff6, 0x3fefffff, 0x906e88aa, 0x3c70b0cd, - 0x54442d16, 0x3e6921fb, 0xfe19968a, 0xbb03b7c0, - 0xffffffd9, 0x3fefffff, 0xdf22ed26, 0xbc8e9e64, - 0x54442d0e, 0x3e7921fb, 0x8d1b6ffb, 0xbaee8bb4, - 0xffffff62, 0x3fefffff, 0x0dd18f0f, 0x3c6619b2, - 0x54442cef, 0x3e8921fb, 0x7f2b20fb, 0xbb00e133, - 0xfffffd88, 0x3fefffff, 0x0dd314b2, 0x3c8619b2, - 0x54442c73, 0x3e9921fb, 0x619fdf6e, 0xbb174e98, - 0xfffff621, 0x3fefffff, 0x3764acf5, 0x3c8866c8, - 0x54442a83, 0x3ea921fb, 0xf5b2407f, 0xbb388215, - 0xffffd886, 0x3fefffff, 0x20e7a944, 0xbc8e64df, - 0x544422c2, 0x3eb921fb, 0x7b9b9f23, 0x3b5a0961, - 0xffff6216, 0x3fefffff, 0x52ee25ea, 0x3c69b20e, - 0x544403c1, 0x3ec921fb, 0x4df6a86a, 0xbb5999d9, - 0xfffd8858, 0x3fefffff, 0xd8910ead, 0x3c89b20f, - 0x544387ba, 0x3ed921fb, 0x0809d04d, 0x3b77d9db, - 0xfff62162, 0x3fefffff, 0x438d3925, 0xbc8937a8, - 0x544197a1, 0x3ee921fb, 0xa5d27f7a, 0xbb858b02, - 0xffd88586, 0x3fefffff, 0x94b3ddd2, 0x3c8b22e4, - 0x5439d73a, 0x3ef921fb, 0xf8a3b73d, 0xbb863c7f, - 0xff62161a, 0x3fefffff, 0x7ea469b2, 0xbc835c13, - 0x541ad59e, 0x3f0921fb, 0xb8cee262, 0x3bae9860, - 0xfd885867, 0x3fefffff, 0x23a32e63, 0xbc77d556, - 0x539ecf31, 0x3f1921fb, 0xfcd23a30, 0x3b96b111, - 0xf621619c, 0x3fefffff, 0xbbbd8fe6, 0xbc87507d, - 0x51aeb57c, 0x3f2921fb, 0x4916c435, 0xbbca6e1d, - 0xd8858675, 0x3fefffff, 0x54748eab, 0xbc879f0e, - 0x49ee4ea6, 0x3f3921fb, 0x744a453e, 0x3bde894d, - 0x62161a34, 0x3fefffff, 0xb1f9b9c4, 0xbc6136dc, - 0x2aecb360, 0x3f4921fb, 0x7e566b4c, 0x3be87615, - 0x88586ee6, 0x3feffffd, 0xf173ae5b, 0x3c81af64, - 0xaee6472e, 0x3f5921fa, 0x284a9df8, 0xbbfee52e, - 0x21621d02, 0x3feffff6, 0xebc82813, 0xbc76acfc, - 0xbecca4ba, 0x3f6921f8, 0x7bcab5b2, 0x3c02ba40, - 0x858e8a92, 0x3fefffd8, 0x1883bcf7, 0x3c8359c7, - 0xfe670071, 0x3f7921f0, 0xfe6b7a9b, 0x3bfab967, - 0x169b92db, 0x3fefff62, 0xc81fbd0d, 0x3c85dda3, - 0xfcdec784, 0x3f8921d1, 0xbe836d9d, 0x3c29878e, - 0x6084cd0d, 0x3feffd88, 0x4556e4cb, 0xbc81354d, - 0xf7a3667e, 0x3f992155, 0x091a0130, 0xbbfb1d63, - 0xe3796d7e, 0x3feff621, 0x2e24aa15, 0xbc6c57bc, - 0xf10dd814, 0x3fa91f65, 0x0d569a90, 0xbc2912bd, - 0xa3d12526, 0x3fefd88d, 0x378811c7, 0xbc887df6, - 0xbc29b42c, 0x3fb917a6, 0xd26ed688, 0xbc3e2718, - 0xcff75cb0, 0x3fef6297, 0x2a361fd3, 0x3c756217, - 0x3c69a60b, 0x3fc8f8b8, 0xb9ff8d82, 0xbc626d19, - 0xcf328d46, 0x3fed906b, 0x10231ac2, 0x3c7457e6, - 0xa6aea963, 0x3fd87de2, 0xd3d5a610, 0xbc672ced, - 0x667f3bcd, 0x3fe6a09e, 0x13b26456, 0xbc8bdd34, - 0x667f3bcd, 0x3fe6a09e, 0x13b26456, 0xbc8bdd34, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x3ff00000, 0x00000000, 0x00000000 + 0x00000000, 0x3ff00000, 0x54442d18, 0x3df921fb, + 0xc9be45de, 0xbbf3bd3c, 0xbb77974f, 0x3a91a390, + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e0921fb, + 0xc9be45de, 0xbc13bd3c, 0x54a14928, 0x3aa19bd0, + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e1921fb, + 0xc9be45de, 0xbc33bd3c, 0xb948108a, 0x3ab17cce, + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e2921fb, + 0xc9be45de, 0xbc53bd3c, 0x4be32e14, 0x3ac100c8, + 0x00000000, 0x3ff00000, 0x54442d18, 0x3e3921fb, + 0xc9be45de, 0xbc73bd3c, 0x2c9f4879, 0x3ace215d, + 0xffffffff, 0x3fefffff, 0x54442d18, 0x3e4921fb, + 0x6c837443, 0x3c888586, 0x0005f376, 0x3acd411f, + 0xfffffffe, 0x3fefffff, 0x54442d18, 0x3e5921fb, + 0x4df22ef1, 0xbc8de9e6, 0x9937209e, 0xbaf7b153, + 0xfffffff6, 0x3fefffff, 0x54442d16, 0x3e6921fb, + 0x906e88aa, 0x3c70b0cd, 0xfe19968a, 0xbb03b7c0, + 0xffffffd9, 0x3fefffff, 0x54442d0e, 0x3e7921fb, + 0xdf22ed26, 0xbc8e9e64, 0x8d1b6ffb, 0xbaee8bb4, + 0xffffff62, 0x3fefffff, 0x54442cef, 0x3e8921fb, + 0x0dd18f0f, 0x3c6619b2, 0x7f2b20fb, 0xbb00e133, + 0xfffffd88, 0x3fefffff, 0x54442c73, 0x3e9921fb, + 0x0dd314b2, 0x3c8619b2, 0x619fdf6e, 0xbb174e98, + 0xfffff621, 0x3fefffff, 0x54442a83, 0x3ea921fb, + 0x3764acf5, 0x3c8866c8, 0xf5b2407f, 0xbb388215, + 0xffffd886, 0x3fefffff, 0x544422c2, 0x3eb921fb, + 0x20e7a944, 0xbc8e64df, 0x7b9b9f23, 0x3b5a0961, + 0xffff6216, 0x3fefffff, 0x544403c1, 0x3ec921fb, + 0x52ee25ea, 0x3c69b20e, 0x4df6a86a, 0xbb5999d9, + 0xfffd8858, 0x3fefffff, 0x544387ba, 0x3ed921fb, + 0xd8910ead, 0x3c89b20f, 0x0809d04d, 0x3b77d9db, + 0xfff62162, 0x3fefffff, 0x544197a1, 0x3ee921fb, + 0x438d3925, 0xbc8937a8, 0xa5d27f7a, 0xbb858b02, + 0xffd88586, 0x3fefffff, 0x5439d73a, 0x3ef921fb, + 0x94b3ddd2, 0x3c8b22e4, 0xf8a3b73d, 0xbb863c7f, + 0xff62161a, 0x3fefffff, 0x541ad59e, 0x3f0921fb, + 0x7ea469b2, 0xbc835c13, 0xb8cee262, 0x3bae9860, + 0xfd885867, 0x3fefffff, 0x539ecf31, 0x3f1921fb, + 0x23a32e63, 0xbc77d556, 0xfcd23a30, 0x3b96b111, + 0xf621619c, 0x3fefffff, 0x51aeb57c, 0x3f2921fb, + 0xbbbd8fe6, 0xbc87507d, 0x4916c435, 0xbbca6e1d, + 0xd8858675, 0x3fefffff, 0x49ee4ea6, 0x3f3921fb, + 0x54748eab, 0xbc879f0e, 0x744a453e, 0x3bde894d, + 0x62161a34, 0x3fefffff, 0x2aecb360, 0x3f4921fb, + 0xb1f9b9c4, 0xbc6136dc, 0x7e566b4c, 0x3be87615, + 0x88586ee6, 0x3feffffd, 0xaee6472e, 0x3f5921fa, + 0xf173ae5b, 0x3c81af64, 0x284a9df8, 0xbbfee52e, + 0x21621d02, 0x3feffff6, 0xbecca4ba, 0x3f6921f8, + 0xebc82813, 0xbc76acfc, 0x7bcab5b2, 0x3c02ba40, + 0x858e8a92, 0x3fefffd8, 0xfe670071, 0x3f7921f0, + 0x1883bcf7, 0x3c8359c7, 0xfe6b7a9b, 0x3bfab967, + 0x169b92db, 0x3fefff62, 0xfcdec784, 0x3f8921d1, + 0xc81fbd0d, 0x3c85dda3, 0xbe836d9d, 0x3c29878e, + 0x6084cd0d, 0x3feffd88, 0xf7a3667e, 0x3f992155, + 0x4556e4cb, 0xbc81354d, 0x091a0130, 0xbbfb1d63, + 0xe3796d7e, 0x3feff621, 0xf10dd814, 0x3fa91f65, + 0x2e24aa15, 0xbc6c57bc, 0x0d569a90, 0xbc2912bd, + 0xa3d12526, 0x3fefd88d, 0xbc29b42c, 0x3fb917a6, + 0x378811c7, 0xbc887df6, 0xd26ed688, 0xbc3e2718, + 0xcff75cb0, 0x3fef6297, 0x3c69a60b, 0x3fc8f8b8, + 0x2a361fd3, 0x3c756217, 0xb9ff8d82, 0xbc626d19, + 0xcf328d46, 0x3fed906b, 0xa6aea963, 0x3fd87de2, + 0x10231ac2, 0x3c7457e6, 0xd3d5a610, 0xbc672ced, + 0x667f3bcd, 0x3fe6a09e, 0x667f3bcd, 0x3fe6a09e, + 0x13b26456, 0xbc8bdd34, 0x13b26456, 0xbc8bdd34, + 0x00000000, 0x00000000, 0x00000000, 0x3ff00000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; int @@ -259,7 +259,7 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { w[i][0] = ct[2*i][0]; - w[i][1] = ct[2*i][2]; + w[i][1] = ct[2*i][1]; } /* generate sine and cosine tables with maximum error less than 0.5 ULP */ @@ -336,7 +336,7 @@ ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { w[i][0] = ct[2*i][0]; - w[i][1] = ct[2*i][2]; + w[i][1] = ct[2*i][1]; } /* generate sine and cosine tables with maximum error less than 0.5 ULP */ -- cgit v1.1 From f4e533c64e0c005e567b3fa76bc3456facbe5484 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 16 Sep 2015 17:52:55 +0300 Subject: Add double-double arithmetic to generate "exact" double precision cosine and sine tables. Correct rounding verified using MPFR upto 2^28. SSE2 optimized ffts_generate_cosine_sine_pow2_64f takes twice as long as ffts_generate_cosine_sine_pow2_32f. --- src/ffts_dd.h | 230 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ src/ffts_trig.c | 146 +++++++++++++++++++++++++++++++++++ src/ffts_trig.h | 3 + 3 files changed, 379 insertions(+) create mode 100644 src/ffts_dd.h diff --git a/src/ffts_dd.h b/src/ffts_dd.h new file mode 100644 index 0000000..e9402c6 --- /dev/null +++ b/src/ffts_dd.h @@ -0,0 +1,230 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2015, Jukka Ojanen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef FFTS_DD_H +#define FFTS_DD_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + +#include "ffts_attributes.h" + +#if HAVE_SSE2 +#include +#endif + +/* double-double number */ +struct ffts_dd_t +{ + double hi; + double lo; +}; + +#if HAVE_SSE2 +/* double-double vector */ +struct ffts_dd2_t { + __m128d hi; + __m128d lo; +}; +#endif + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_add_dd_unnormalized(const struct ffts_dd_t a, + const struct ffts_dd_t b); + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_mul_dd_unnormalized(const struct ffts_dd_t a, + const struct ffts_dd_t b); + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_split(double a); + +/* aka quick-two-sum */ +static FFTS_INLINE struct ffts_dd_t +ffts_dd_add(double a, double b) +{ + struct ffts_dd_t dd; + dd.hi = a + b; + dd.lo = b - (dd.hi - a); + return dd; +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_add_dd(const struct ffts_dd_t a, + const struct ffts_dd_t b) +{ + struct ffts_dd_t t1 = ffts_dd_add_dd_unnormalized(a, b); + return ffts_dd_add(t1.hi, t1.lo); +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_add_dd_unnormalized(const struct ffts_dd_t a, + const struct ffts_dd_t b) +{ + struct ffts_dd_t dd; + double e1; + dd.hi = a.hi + b.hi; + e1 = dd.hi - a.hi; + dd.lo = ((a.hi - (dd.hi - e1)) + (b.hi - e1)) + (a.lo + b.lo); + return dd; +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_mul(const double a, const double b) +{ + struct ffts_dd_t dd; + struct ffts_dd_t t1 = ffts_dd_split(a); + struct ffts_dd_t t2 = ffts_dd_split(b); + dd.hi = a * b; + dd.lo = (t1.hi * t2.hi - dd.hi); + dd.lo += (t1.hi * t2.lo + t1.lo * t2.hi); + dd.lo += t1.lo * t2.lo; + return dd; +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_mul_dd(const struct ffts_dd_t a, + const struct ffts_dd_t b) +{ + struct ffts_dd_t dd = ffts_dd_mul_dd_unnormalized(a, b); + return ffts_dd_add(dd.hi, dd.lo); +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_mul_dd_unnormalized(const struct ffts_dd_t a, + const struct ffts_dd_t b) +{ + struct ffts_dd_t dd = ffts_dd_mul(a.hi, b.hi); + dd.lo += (a.hi * b.lo + a.lo * b.hi); + return dd; +} + +static FFTS_INLINE struct ffts_dd_t +ffts_dd_split(double a) +{ + /* 2^27+1 = 134217729 */ + struct ffts_dd_t dd; + double t = 134217729.0 * a; + dd.hi = t - (t - a); + dd.lo = a - dd.hi; + return dd; +} + +#if HAVE_SSE2 +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t a, + const struct ffts_dd2_t b); + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t a, + const struct ffts_dd2_t b); + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_split(__m128d a); + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_add(__m128d a, __m128d b) +{ + struct ffts_dd2_t dd2; + dd2.hi = _mm_add_pd(a, b); + dd2.lo = _mm_sub_pd(b, _mm_sub_pd(dd2.hi, a)); + return dd2; +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_add_dd2(const struct ffts_dd2_t a, + const struct ffts_dd2_t b) +{ + struct ffts_dd2_t t1 = ffts_dd2_add_dd2_unnormalized(a, b); + return ffts_dd2_add(t1.hi, t1.lo); +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t a, + const struct ffts_dd2_t b) +{ + struct ffts_dd2_t dd2; + __m128d e1; + dd2.hi = _mm_add_pd(a.hi, b.hi); + e1 = _mm_sub_pd(dd2.hi, a.hi); + dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd(a.hi, _mm_sub_pd(dd2.hi, e1)), + _mm_sub_pd(b.hi, e1)), _mm_add_pd(a.lo, b.lo)); + return dd2; +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_mul(const __m128d a, const __m128d b) +{ + struct ffts_dd2_t dd2; + struct ffts_dd2_t t1 = ffts_dd2_split(a); + struct ffts_dd2_t t2 = ffts_dd2_split(b); + dd2.hi = _mm_mul_pd(a, b); + dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd( + _mm_mul_pd(t1.hi, t2.hi), dd2.hi), + _mm_add_pd(_mm_mul_pd(t1.hi, t2.lo), + _mm_mul_pd(t1.lo, t2.hi))), + _mm_mul_pd(t1.lo, t2.lo)); + return dd2; +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_mul_dd2(const struct ffts_dd2_t a, + const struct ffts_dd2_t b) +{ + struct ffts_dd2_t dd2 = ffts_dd2_mul_dd2_unnormalized(a, b); + return ffts_dd2_add(dd2.hi, dd2.lo); +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t a, + const struct ffts_dd2_t b) +{ + struct ffts_dd2_t dd2 = ffts_dd2_mul(a.hi, b.hi); + dd2.lo = _mm_add_pd(dd2.lo, _mm_add_pd( + _mm_mul_pd(a.hi, b.lo), _mm_mul_pd(a.lo, b.hi))); + return dd2; +} + +static FFTS_INLINE struct ffts_dd2_t +ffts_dd2_split(__m128d a) +{ + /* 2^27+1 = 134217729 */ + struct ffts_dd2_t dd2; + __m128d t = _mm_mul_pd(a, _mm_set1_pd(134217729.0)); + dd2.hi = _mm_sub_pd(t, _mm_sub_pd(t, a)); + dd2.lo = _mm_sub_pd(a, dd2.hi); + return dd2; +} +#endif /* HAVE_SSE2 */ + +#endif /* FFTS_DD_H */ diff --git a/src/ffts_trig.c b/src/ffts_trig.c index 624d2c3..1ca9c98 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -31,6 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_trig.h" +#include "ffts_dd.h" /* 1/(2*cos(pow(2,-p)*pi)) */ static const FFTS_ALIGN(16) unsigned int half_secant[132] = { @@ -286,6 +287,151 @@ exit: return 0; } +#if HAVE_SSE2 +int +ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) +{ + static const __m128d sign_swap = { 0.0, -0.0 }; + const struct ffts_dd2_t *FFTS_RESTRICT ct; + const double *FFTS_RESTRICT hs; + struct ffts_dd2_t FFTS_ALIGN(16) w[32]; + struct ffts_dd2_t FFTS_ALIGN(16) h[32]; + int i, log_2, offset; + + /* size must be a power of two */ + if (!table || !table_size || (table_size & (table_size - 1))) { + return -1; + } + + /* the first */ + table[0][0] = 1.0; + table[0][1] = -0.0; + + if (FFTS_UNLIKELY(table_size == 1)) { + goto exit; + } + + if (FFTS_UNLIKELY(table_size == 2)) { + /* skip over */ + i = 1; + goto mid_point; + } + + /* calculate table offset */ + FFTS_ASSUME(table_size/2 > 1); + log_2 = ffts_ctzl(table_size); + FFTS_ASSUME(log_2 > 1); + offset = 32 - log_2; + ct = (const struct ffts_dd2_t*) + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); + hs = (const double*) &half_secant[4 * offset]; + + /* initialize from lookup table */ + for (i = 0; i <= log_2; i++) { + w[i] = ct[i]; + + /* duplicate the high and low parts */ + h[i].hi = _mm_set1_pd(hs[2*i + 0]); + h[i].lo = _mm_set1_pd(hs[2*i + 1]); + } + + /* generate sine and cosine tables with maximum error less than 0.5 ULP */ + for (i = 1; i < table_size/2; i++) { + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + /* result of ffts_dd_mul_dd is normalized */ + _mm_store_pd((double*) &table[i + 0], + _mm_or_pd(w[log_2].hi, sign_swap)); + _mm_store_pd((double*) &table[table_size - i], + _mm_or_pd(_mm_shuffle_pd(w[log_2].hi, w[log_2].hi, 1), sign_swap)); + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2] = ffts_dd2_mul_dd2(h[log_2], + ffts_dd2_add_dd2_unnormalized(w[log_2 + 1], w[offset])); + } + +mid_point: + table[i][0] = 0.707106781186547524; + table[i][1] = -0.707106781186547524; + +exit: + return 0; +} +#else +int +ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) +{ + const struct ffts_dd_t *FFTS_RESTRICT ct; + const struct ffts_dd_t *FFTS_RESTRICT hs; + struct ffts_dd_t FFTS_ALIGN(16) w[32][2]; + int i, log_2, offset; + + /* size must be a power of two */ + if (!table || !table_size || (table_size & (table_size - 1))) { + return -1; + } + + /* the first */ + table[0][0] = 1.0; + table[0][1] = -0.0; + + if (FFTS_UNLIKELY(table_size == 1)) { + goto exit; + } + + if (FFTS_UNLIKELY(table_size == 2)) { + /* skip over */ + i = 1; + goto mid_point; + } + + /* calculate table offset */ + FFTS_ASSUME(table_size/2 > 1); + log_2 = ffts_ctzl(table_size); + FFTS_ASSUME(log_2 > 1); + offset = 32 - log_2; + ct = (const struct ffts_dd_t*) + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); + hs = (const struct ffts_dd_t*) &half_secant[4 * offset]; + + /* initialize from lookup table */ + for (i = 0; i <= log_2; i++) { + w[i][0].hi = ct[2*i + 0].hi; + w[i][0].lo = ct[2*i + 1].hi; + w[i][1].hi = ct[2*i + 0].lo; + w[i][1].lo = ct[2*i + 1].lo; + } + + /* generate sine and cosine tables with maximum error less than 0.5 ULP */ + for (i = 1; i < table_size/2; i++) { + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + /* result of ffts_dd_mul_dd is normalized */ + table[i + 0][0] = w[log_2][0].hi; + table[i + 0][1] = -w[log_2][1].hi; + table[table_size - i][0] = w[log_2][1].hi; + table[table_size - i][1] = -w[log_2][0].hi; + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2][0] = ffts_dd_mul_dd(hs[log_2], + ffts_dd_add_dd_unnormalized(w[log_2 + 1][0], w[offset][0])); + w[log_2][1] = ffts_dd_mul_dd(hs[log_2], + ffts_dd_add_dd_unnormalized(w[log_2 + 1][1], w[offset][1])); + } + +mid_point: + table[i][0] = 0.707106781186547524; + table[i][1] = -0.707106781186547524; + +exit: + return 0; +} +#endif + int ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, int sign, diff --git a/src/ffts_trig.h b/src/ffts_trig.h index cfed2fb..0b22738 100644 --- a/src/ffts_trig.h +++ b/src/ffts_trig.h @@ -46,6 +46,9 @@ int ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size); int +ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size); + +int ffts_generate_table_1d_real_32f(struct _ffts_plan_t *const p, int sign, int invert); -- cgit v1.1 From e013b85d38101abc0c449dff509aaf8a0057d321 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 17 Sep 2015 16:49:58 +0300 Subject: Add SSE2 optimized ffts_generate_cosine_sine_pow2_32f --- src/ffts_trig.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 84 insertions(+), 14 deletions(-) diff --git a/src/ffts_trig.c b/src/ffts_trig.c index 1ca9c98..cdd2d05 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -221,12 +221,15 @@ exit: * O. Buneman, Stable on–line creation of sines and cosines of * successive angles, Proc. IEEE 75, 1434 – 1435 (1987). */ +#if HAVE_SSE2 int ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) { - const ffts_cpx_64f *FFTS_RESTRICT ct; + static const __m128d sign_swap = { 0.0, -0.0 }; + const __m128d *FFTS_RESTRICT ct; const double *FFTS_RESTRICT hs; - ffts_cpx_64f FFTS_ALIGN(16) w[32]; + __m128d FFTS_ALIGN(16) w[32]; + __m128d FFTS_ALIGN(16) h[32]; int i, log_2, offset; /* size must be a power of two */ @@ -253,14 +256,16 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) log_2 = ffts_ctzl(table_size); FFTS_ASSUME(log_2 > 1); offset = 32 - log_2; - ct = (const ffts_cpx_64f*) + ct = (const __m128d*) FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); hs = (const double*) &half_secant[4 * offset]; /* initialize from lookup table */ for (i = 0; i <= log_2; i++) { - w[i][0] = ct[2*i][0]; - w[i][1] = ct[2*i][1]; + w[i] = ct[2*i]; + + /* duplicate the high part */ + h[i] = _mm_set1_pd(hs[2*i]); } /* generate sine and cosine tables with maximum error less than 0.5 ULP */ @@ -268,15 +273,15 @@ ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) /* calculate trailing zeros in index */ log_2 = ffts_ctzl(i); - table[i + 0][0] = (float) w[log_2][0]; - table[i + 0][1] = (float) -w[log_2][1]; - table[table_size - i][0] = (float) w[log_2][1]; - table[table_size - i][1] = (float) -w[log_2][0]; + /* note that storing is not 16 byte aligned */ + _mm_storel_pi((__m64*) &table[i + 0], + _mm_cvtpd_ps(_mm_or_pd(w[log_2], sign_swap))); + _mm_storel_pi((__m64*) &table[table_size - i], _mm_cvtpd_ps( + _mm_or_pd(_mm_shuffle_pd(w[log_2], w[log_2], 1), sign_swap))); /* skip and find next trailing zero */ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); - w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]); - w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]); + w[log_2] = _mm_mul_pd(h[log_2], _mm_add_pd(w[log_2 + 1], w[offset])); } mid_point: @@ -287,7 +292,6 @@ exit: return 0; } -#if HAVE_SSE2 int ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) { @@ -304,8 +308,8 @@ ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) } /* the first */ - table[0][0] = 1.0; - table[0][1] = -0.0; + table[0][0] = 1.0; + table[0][1] = -0.0; if (FFTS_UNLIKELY(table_size == 1)) { goto exit; @@ -361,6 +365,72 @@ exit: } #else int +ffts_generate_cosine_sine_pow2_32f(ffts_cpx_32f *const table, int table_size) +{ + const ffts_cpx_64f *FFTS_RESTRICT ct; + const double *FFTS_RESTRICT hs; + ffts_cpx_64f FFTS_ALIGN(16) w[32]; + int i, log_2, offset; + + /* size must be a power of two */ + if (!table || !table_size || (table_size & (table_size - 1))) { + return -1; + } + + /* the first */ + table[0][0] = 1.0f; + table[0][1] = -0.0f; + + if (FFTS_UNLIKELY(table_size == 1)) { + goto exit; + } + + if (FFTS_UNLIKELY(table_size == 2)) { + /* skip over */ + i = 1; + goto mid_point; + } + + /* calculate table offset */ + FFTS_ASSUME(table_size/2 > 1); + log_2 = ffts_ctzl(table_size); + FFTS_ASSUME(log_2 > 1); + offset = 32 - log_2; + ct = (const ffts_cpx_64f*) + FFTS_ASSUME_ALIGNED_32(&cos_sin_pi_table[8 * offset]); + hs = (const double*) &half_secant[4 * offset]; + + /* initialize from lookup table */ + for (i = 0; i <= log_2; i++) { + w[i][0] = ct[2*i][0]; + w[i][1] = ct[2*i][1]; + } + + /* generate sine and cosine tables with maximum error less than 0.5 ULP */ + for (i = 1; i < table_size/2; i++) { + /* calculate trailing zeros in index */ + log_2 = ffts_ctzl(i); + + table[i + 0][0] = (float) w[log_2][0]; + table[i + 0][1] = (float) -w[log_2][1]; + table[table_size - i][0] = (float) w[log_2][1]; + table[table_size - i][1] = (float) -w[log_2][0]; + + /* skip and find next trailing zero */ + offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); + w[log_2][0] = hs[2 * log_2] * (w[log_2 + 1][0] + w[offset][0]); + w[log_2][1] = hs[2 * log_2] * (w[log_2 + 1][1] + w[offset][1]); + } + +mid_point: + table[i][0] = 0.70710677f; + table[i][1] = -0.70710677f; + +exit: + return 0; +} + +int ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) { const struct ffts_dd_t *FFTS_RESTRICT ct; -- cgit v1.1 From 2132b65a334a7a875da791bb971b79c103c55623 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 13 Oct 2015 00:44:58 +0300 Subject: Fix MSVC error C2719 --- src/ffts_dd.h | 36 ++++++++++++++++++------------------ src/ffts_trig.c | 5 +++-- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/ffts_dd.h b/src/ffts_dd.h index e9402c6..f8bbee4 100644 --- a/src/ffts_dd.h +++ b/src/ffts_dd.h @@ -142,12 +142,12 @@ ffts_dd_split(double a) #if HAVE_SSE2 static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t a, - const struct ffts_dd2_t b); +ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b); static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t a, - const struct ffts_dd2_t b); +ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b); static FFTS_INLINE struct ffts_dd2_t ffts_dd2_split(__m128d a); @@ -162,23 +162,23 @@ ffts_dd2_add(__m128d a, __m128d b) } static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_add_dd2(const struct ffts_dd2_t a, - const struct ffts_dd2_t b) +ffts_dd2_add_dd2(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b) { struct ffts_dd2_t t1 = ffts_dd2_add_dd2_unnormalized(a, b); return ffts_dd2_add(t1.hi, t1.lo); } static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t a, - const struct ffts_dd2_t b) +ffts_dd2_add_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b) { struct ffts_dd2_t dd2; __m128d e1; - dd2.hi = _mm_add_pd(a.hi, b.hi); - e1 = _mm_sub_pd(dd2.hi, a.hi); - dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd(a.hi, _mm_sub_pd(dd2.hi, e1)), - _mm_sub_pd(b.hi, e1)), _mm_add_pd(a.lo, b.lo)); + dd2.hi = _mm_add_pd(a->hi, b->hi); + e1 = _mm_sub_pd(dd2.hi, a->hi); + dd2.lo = _mm_add_pd(_mm_add_pd(_mm_sub_pd(a->hi, _mm_sub_pd(dd2.hi, e1)), + _mm_sub_pd(b->hi, e1)), _mm_add_pd(a->lo, b->lo)); return dd2; } @@ -198,20 +198,20 @@ ffts_dd2_mul(const __m128d a, const __m128d b) } static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_mul_dd2(const struct ffts_dd2_t a, - const struct ffts_dd2_t b) +ffts_dd2_mul_dd2(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b) { struct ffts_dd2_t dd2 = ffts_dd2_mul_dd2_unnormalized(a, b); return ffts_dd2_add(dd2.hi, dd2.lo); } static FFTS_INLINE struct ffts_dd2_t -ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t a, - const struct ffts_dd2_t b) +ffts_dd2_mul_dd2_unnormalized(const struct ffts_dd2_t *const FFTS_RESTRICT a, + const struct ffts_dd2_t *const FFTS_RESTRICT b) { - struct ffts_dd2_t dd2 = ffts_dd2_mul(a.hi, b.hi); + struct ffts_dd2_t dd2 = ffts_dd2_mul(a->hi, b->hi); dd2.lo = _mm_add_pd(dd2.lo, _mm_add_pd( - _mm_mul_pd(a.hi, b.lo), _mm_mul_pd(a.lo, b.hi))); + _mm_mul_pd(a->hi, b->lo), _mm_mul_pd(a->lo, b->hi))); return dd2; } diff --git a/src/ffts_trig.c b/src/ffts_trig.c index cdd2d05..74ebfd2 100644 --- a/src/ffts_trig.c +++ b/src/ffts_trig.c @@ -300,6 +300,7 @@ ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) const double *FFTS_RESTRICT hs; struct ffts_dd2_t FFTS_ALIGN(16) w[32]; struct ffts_dd2_t FFTS_ALIGN(16) h[32]; + struct ffts_dd2_t FFTS_ALIGN(16) sum; int i, log_2, offset; /* size must be a power of two */ @@ -352,8 +353,8 @@ ffts_generate_cosine_sine_pow2_64f(ffts_cpx_64f *const table, int table_size) /* skip and find next trailing zero */ offset = (log_2 + 2 + ffts_ctzl(~i >> (log_2 + 2))); - w[log_2] = ffts_dd2_mul_dd2(h[log_2], - ffts_dd2_add_dd2_unnormalized(w[log_2 + 1], w[offset])); + sum = ffts_dd2_add_dd2_unnormalized(&w[log_2 + 1], &w[offset]); + w[log_2] = ffts_dd2_mul_dd2(&h[log_2], &sum); } mid_point: -- cgit v1.1 From cf746df26a0cd6e1948988e458fc7305bb9bbdbd Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 16:24:15 +0300 Subject: Fix error "target specific option mismatch", _mm_addsub_ps intrinsic needs SSE3 --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 593f418..b528067 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -270,7 +270,9 @@ elseif(CMAKE_COMPILER_IS_GNUCC) list(APPEND FFTS_EXTRA_LIBRARIES m) endif(HAVE_LIBM) - if(HAVE_EMMINTRIN_H) + if(HAVE_PMMINTRIN_H) + add_definitions(-msse3) + elseif(HAVE_EMMINTRIN_H) add_definitions(-msse2) elseif(HAVE_XMMINTRIN_H) add_definitions(-msse) -- cgit v1.1 From 63be44d1125d48127c2ca97109788034a3ae4c68 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 16:25:00 +0300 Subject: First attempt to enable Travis --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..2eb0b5f --- /dev/null +++ b/.travis.yml @@ -0,0 +1,3 @@ +language: c +script: + - mkdir build && cd build && cmake .. && cmake --build . -- cgit v1.1 From e3dc730a258b023df6015ec392afbb3bd250dd14 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 16:29:11 +0300 Subject: Fix CMake warning for mis-matching arguments --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b528067..5d50cf6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,7 +276,7 @@ elseif(CMAKE_COMPILER_IS_GNUCC) add_definitions(-msse2) elseif(HAVE_XMMINTRIN_H) add_definitions(-msse) - endif(HAVE_EMMINTRIN_H) + endif(HAVE_PMMINTRIN_H) endif(MSVC) include_directories(include) -- cgit v1.1 From bd2ee488602eeca657d3d65a712944e06edc5858 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 16:42:20 +0300 Subject: Enable Travis to OSX building --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 2eb0b5f..f791aed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,6 @@ language: c +os: + - linux + - osx script: - mkdir build && cd build && cmake .. && cmake --build . -- cgit v1.1 From ffe25628f8e056ae9995a7110895cf70e984160d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 17:07:36 +0300 Subject: Rename README to README.md to support markdown --- README | 34 ---------------------------------- README.md | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 34 deletions(-) delete mode 100644 README create mode 100644 README.md diff --git a/README b/README deleted file mode 100644 index f7a67a0..0000000 --- a/README +++ /dev/null @@ -1,34 +0,0 @@ -FFTS -- The Fastest Fourier Transform in the South -by Anthony Blake - -To build for Android, edit and run build_android.sh - -To build for iOS, edit and run build_iphone.sh - -To build for Linux or OS X on x86, run - ./configure --enable-sse --enable-single --prefix=/usr/local - make - make install - -Optionally build for Windows and Linux with CMake, run - mkdir build - cd build - cmake .. - -FFTS dynamically generates code at runtime. This can be disabled with ---disable-dynamic-code - -Note that 32 bit x86 dynamic machine code generation is not supported at the moment. - -For JNI targets: --enable-jni will build the jni stuff automatically for -the host target, and --enable-shared must also be added manually for it to -work. - -If you like FFTS, please show your support by sending a postcard to: - -Anthony Blake -Department of Computer Science -The University of Waikato -Private Bag 3105 -Hamilton 3240 -NEW ZEALAND diff --git a/README.md b/README.md new file mode 100644 index 0000000..f7a67a0 --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +FFTS -- The Fastest Fourier Transform in the South +by Anthony Blake + +To build for Android, edit and run build_android.sh + +To build for iOS, edit and run build_iphone.sh + +To build for Linux or OS X on x86, run + ./configure --enable-sse --enable-single --prefix=/usr/local + make + make install + +Optionally build for Windows and Linux with CMake, run + mkdir build + cd build + cmake .. + +FFTS dynamically generates code at runtime. This can be disabled with +--disable-dynamic-code + +Note that 32 bit x86 dynamic machine code generation is not supported at the moment. + +For JNI targets: --enable-jni will build the jni stuff automatically for +the host target, and --enable-shared must also be added manually for it to +work. + +If you like FFTS, please show your support by sending a postcard to: + +Anthony Blake +Department of Computer Science +The University of Waikato +Private Bag 3105 +Hamilton 3240 +NEW ZEALAND -- cgit v1.1 From dfcaa570f9c63b4dbc8ae46924a52f60389fda0d Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 17:10:24 +0300 Subject: Add Travis build status --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f7a67a0..498f599 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ -FFTS -- The Fastest Fourier Transform in the South -by Anthony Blake +# FFTS -- The Fastest Fourier Transform in the South + +[![Build Status](https://travis-ci.org/linkotec/ffts.svg?branch=master)](https://travis-ci.org/linkotec/ffts) To build for Android, edit and run build_android.sh -- cgit v1.1 From e5d3853205a4666c40a3fe7f789e1dcabc59d375 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 14 Oct 2015 17:21:46 +0300 Subject: Fix styling --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 498f599..50fb60e 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,9 @@ work. If you like FFTS, please show your support by sending a postcard to: -Anthony Blake -Department of Computer Science -The University of Waikato -Private Bag 3105 -Hamilton 3240 +Anthony Blake
+Department of Computer Science
+The University of Waikato
+Private Bag 3105
+Hamilton 3240
NEW ZEALAND -- cgit v1.1 From 031d73b0a86058c4a7cb0ecebd7fb2a9015cf13b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 21 Oct 2015 14:10:27 +0300 Subject: Detection of pmmintrin.h with GCC may fail if required instruction set is not enabled --- CMakeLists.txt | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5d50cf6..00122db 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -209,27 +209,49 @@ if(NOT CMAKE_CROSSCOMPILING) endif(NOT HARDFP_SUPPORTED) endif(NEON_SUPPORTED OR VFP_SUPPORTED) else() + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + + # enable SSE code generation + if(CMAKE_COMPILER_IS_GNUCC) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse") + endif(CMAKE_COMPILER_IS_GNUCC) + # check if the platform has support for SSE intrinsics check_include_file(xmmintrin.h HAVE_XMMINTRIN_H) if(HAVE_XMMINTRIN_H) add_definitions(-DHAVE_SSE) + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) endif(HAVE_XMMINTRIN_H) + # enable SSE2 code generation + if(CMAKE_COMPILER_IS_GNUCC) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse2") + endif(CMAKE_COMPILER_IS_GNUCC) + # check if the platform has support for SSE2 intrinsics check_include_file(emmintrin.h HAVE_EMMINTRIN_H) if(HAVE_EMMINTRIN_H) add_definitions(-DHAVE_SSE2) + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) endif(HAVE_EMMINTRIN_H) + # enable SSE3 code generation + if(CMAKE_COMPILER_IS_GNUCC) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse3") + endif(CMAKE_COMPILER_IS_GNUCC) + # check if the platform has support for SSE3 intrinsics check_include_file(pmmintrin.h HAVE_PMMINTRIN_H) if(HAVE_PMMINTRIN_H) add_definitions(-DHAVE_PMMINTRIN_H) add_definitions(-DHAVE_SSE3) + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) else() # check if the platform has specific intrinsics check_include_file(intrin.h HAVE_INTRIN_H) if(HAVE_INTRIN_H) + add_definitions(-DHAVE_INTRIN_H) + check_c_source_compiles(" #include int main(int argc, char** argv) @@ -239,9 +261,9 @@ if(NOT CMAKE_CROSSCOMPILING) return _mm_movemask_ps(_mm_moveldup_ps(_mm_set_ss(1.0f))); }" HAVE__MM_MOVELDUP_PS ) + if(HAVE__MM_MOVELDUP_PS) # assume that we have all SSE3 intrinsics - add_definitions(-DHAVE_INTRIN_H) add_definitions(-DHAVE_SSE3) endif(HAVE__MM_MOVELDUP_PS) endif(HAVE_INTRIN_H) @@ -263,7 +285,7 @@ elseif(CMAKE_COMPILER_IS_GNUCC) # enable all warnings set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra") - # some systems need libm for some of the math functions to work + # some systems need libm for the math functions to work check_library_exists(m pow "" HAVE_LIBM) if(HAVE_LIBM) list(APPEND CMAKE_REQUIRED_LIBRARIES m) -- cgit v1.1 From 42546982a254031a5ead53e53ba2a5872b582651 Mon Sep 17 00:00:00 2001 From: Mikko Orispaa Date: Fri, 20 Nov 2015 07:32:34 +0000 Subject: Changed NEON test in CMakeList.txt so that it works for newer ARMs (-mfloat-abi=hard). Fixed a bug in test.c (did not compile for NEON-ARM). --- CMakeLists.txt | 4 ++-- tests/test.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 00122db..f6f5c4c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -130,7 +130,7 @@ if(NOT CMAKE_CROSSCOMPILING) ) # Test running with -mfpu=neon - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=softfp") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=hard") check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SUPPORTED) if(NOT NEON_SUPPORTED) @@ -389,4 +389,4 @@ add_executable(ffts_test target_link_libraries(ffts_test ffts_static ${FFTS_EXTRA_LIBRARIES} -) \ No newline at end of file +) diff --git a/tests/test.c b/tests/test.c index 9559095..d07f766 100644 --- a/tests/test.c +++ b/tests/test.c @@ -165,7 +165,7 @@ int main(int argc, char *argv[]) printf("%d %d %f %f\n", i, sign, output[2*i], output[2*i+1]); ffts_free(p); -#ifdef HAVE_NEON +#ifdef HAVE_SSE _mm_free(input); _mm_free(output); #else @@ -189,4 +189,4 @@ int main(int argc, char *argv[]) } return 0; -} \ No newline at end of file +} -- cgit v1.1 From c3e325911ae15524c814db75fe701df91d6f7c2a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 24 Nov 2015 11:55:21 +0200 Subject: Fix ARM 'softfp' detection, broken by ARM 'hard' float detection --- CMakeLists.txt | 50 +++++++++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f6f5c4c..6536ef0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -129,16 +129,34 @@ if(NOT CMAKE_CROSSCOMPILING) }" ) - # Test running with -mfpu=neon + # Test running with -mfpu=neon and -mfloat-abi=hard set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=hard") - check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SUPPORTED) - - if(NOT NEON_SUPPORTED) - # Fallback using VFP if NEON is not supported - if(ENABLE_NEON) - message(FATAL_ERROR "FFTS cannot enable NEON on this platform") - endif(ENABLE_NEON) + check_c_source_runs("${TEST_SOURCE_CODE}" NEON_HARDFP_SUPPORTED) + + if(NOT NEON_HARDFP_SUPPORTED) + # Test running with -mfpu=neon and -mfloat-abi=softfp + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=softfp") + check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SOFTFP_SUPPORTED) + + if(NOT NEON_SOFTFP_SUPPORTED) + if(ENABLE_NEON) + message(FATAL_ERROR "FFTS cannot enable NEON on this platform") + endif(ENABLE_NEON) + else() + message("FFTS is using 'neon' FPU and 'softfp' float ABI") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=softfp") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=softfp") + set(ENABLE_NEON ON) + endif(NOT NEON_SOFTFP_SUPPORTED) + else() + message("FFTS is using 'neon' FPU and 'hard' float ABI") + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=hard") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=hard") + set(ENABLE_NEON ON) + endif(NOT NEON_HARDFP_SUPPORTED) + # Fallback using VFP if NEON is not supported + if(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED) # Test for ARM VFP support set(TEST_SOURCE_CODE " double sum(double a, double b) @@ -169,19 +187,9 @@ if(NOT CMAKE_CROSSCOMPILING) message("FFTS is using 'vfp' FPU") set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=vfp") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp") + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) set(ENABLE_VFP ON) endif(NOT VFP_SUPPORTED) - else() - message("FFTS is using 'neon' FPU") - set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon") - set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon") - set(ENABLE_NEON ON) - endif(NOT NEON_SUPPORTED) - - # Determinate float ABI if NEON or VFP is used - if(NEON_SUPPORTED OR VFP_SUPPORTED) - set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) # Test running with -mfloat-abi=hard set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=hard") @@ -193,7 +201,7 @@ if(NOT CMAKE_CROSSCOMPILING) # Test running with -mfloat-abi=softfp set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=softfp") check_c_source_runs("${TEST_SOURCE_CODE}" SOFTFP_SUPPORTED) - + if(NOT SOFTFP_SUPPORTED) # Most likely development libraries are missing message(WARNING "FFTS is using 'soft' float ABI") @@ -207,7 +215,7 @@ if(NOT CMAKE_CROSSCOMPILING) set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=hard") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard") endif(NOT HARDFP_SUPPORTED) - endif(NEON_SUPPORTED OR VFP_SUPPORTED) + endif(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED) else() set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) -- cgit v1.1 From ae1b59ddd07cb66b0807bc2c7c981ce96c69acea Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 30 Nov 2015 17:16:01 +0200 Subject: Enable building shared library and start version numbering from 0.9.0. On Windows when using FFTS as a DLL, define FFTS_SHARED. This is not mandatory, but it offers a little performance increase. Hide symbols when possible to improve compiler optimization and sizeof binary. Use CMake target alias "ffts" to choose between static and shared library, preferring static --- CMakeLists.txt | 97 +++++++++++++++++++++++++++++++++++++++++++++++------- include/ffts.h | 43 +++++++++++++++++++----- src/ffts.c | 8 +++-- src/ffts_nd.c | 6 ++-- src/ffts_real.c | 2 +- src/ffts_real_nd.c | 6 ++-- 6 files changed, 134 insertions(+), 28 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6536ef0..6c72b5b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,13 @@ cmake_minimum_required(VERSION 2.8) project(ffts C ASM) +# TODO: to support AutoConfigure building, this should came from "template" file +set(FFTS_MAJOR 0) +set(FFTS_MINOR 9) +set(FFTS_MICRO 0) + +set(FFTS_VERSION "ffts-${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO}") + set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) set_property(GLOBAL PROPERTY USE_FOLDERS ON) @@ -27,15 +34,25 @@ option(ENABLE_RUNTIME_DYNAMIC_CODE "Enables the runtime generation of dynamic machine code." ON ) +option(GENERATE_POSITION_INDEPENDENT_CODE + "Generate position independent code" OFF +) + option(ENABLE_SHARED "Enable building a shared library." OFF ) +option(ENABLE_STATIC + "Enable building a static library." ON +) + include(CheckCSourceCompiles) include(CheckCSourceRuns) include(CheckIncludeFile) -add_definitions(-DFFTS_CMAKE_GENERATED) +# Ensure defined when building FFTS (as opposed to using it from +# another project). Used to export functions from Windows DLL. +add_definitions(-DFFTS_BUILD) # check existence of various headers check_include_file(malloc.h HAVE_MALLOC_H) @@ -286,13 +303,24 @@ if(MSVC) # enable all warnings but also disable some.. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4 /wd4127") + # mark debug versions + set(CMAKE_DEBUG_POSTFIX "d") + add_definitions(-D_USE_MATH_DEFINES) elseif(CMAKE_COMPILER_IS_GNUCC) + include(CheckCCompilerFlag) include(CheckLibraryExists) # enable all warnings set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra") + # check if we can control visibility of symbols + check_c_compiler_flag(-fvisibility=hidden HAVE_GCC_VISIBILITY) + if(HAVE_GCC_VISIBILITY) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden") + add_definitions(-DHAVE_GCC_VISIBILITY) + endif(HAVE_GCC_VISIBILITY) + # some systems need libm for the math functions to work check_library_exists(m pow "" HAVE_LIBM) if(HAVE_LIBM) @@ -385,16 +413,61 @@ else() ) endif(DISABLE_DYNAMIC_CODE) -add_library(ffts_static - ${FFTS_HEADERS} - ${FFTS_SOURCES} -) +if(GENERATE_POSITION_INDEPENDENT_CODE) + if(CMAKE_VERSION VERSION_LESS "2.8.9") + check_c_compiler_flag(-fPIC HAVE_GCC_PIC) + if(HAVE_GCC_PIC) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC -DPIC") + check_c_compiler_flag(-fPIE HAVE_GCC_PIE) + if(HAVE_GCC_PIE) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fPIE -DPIE") + endif(HAVE_GCC_PIE) + endif(HAVE_GCC_PIC) + else() + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif(CMAKE_VERSION VERSION_LESS "2.8.9") +endif(GENERATE_POSITION_INDEPENDENT_CODE) + +if(ENABLE_SHARED) + add_library(ffts_shared SHARED + ${FFTS_HEADERS} + ${FFTS_SOURCES} + ) -add_executable(ffts_test - tests/test.c -) + # On unix-like platforms the library is called "libffts.so" and on Windows "ffts.dll" + set_target_properties(ffts_shared PROPERTIES + DEFINE_SYMBOL FFTS_SHARED + OUTPUT_NAME ffts + VERSION ${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO} + ) +endif(ENABLE_SHARED) -target_link_libraries(ffts_test - ffts_static - ${FFTS_EXTRA_LIBRARIES} -) +if(ENABLE_STATIC) + add_library(ffts_static STATIC + ${FFTS_HEADERS} + ${FFTS_SOURCES} + ) + + if(UNIX) + # On unix-like platforms the library is called "libffts.a" + set_target_properties(ffts_static PROPERTIES OUTPUT_NAME ffts) + endif(UNIX) +endif(ENABLE_STATIC) + +if(ENABLE_STATIC OR ENABLE_SHARED) + add_executable(ffts_test + tests/test.c + ) + + # link with static library by default + if(ENABLE_STATIC) + add_library(ffts ALIAS ffts_static) + else() + add_library(ffts ALIAS ffts_shared) + endif(ENABLE_STATIC) + + target_link_libraries(ffts_test + ffts + ${FFTS_EXTRA_LIBRARIES} + ) +endif(ENABLE_STATIC OR ENABLE_SHARED) diff --git a/include/ffts.h b/include/ffts.h index 8e25cb4..d187e36 100644 --- a/include/ffts.h +++ b/include/ffts.h @@ -42,27 +42,54 @@ extern "C" { #endif +#if (defined(_WIN32) || defined(WIN32)) && defined(FFTS_SHARED) +# ifdef FFTS_BUILD +# define FFTS_API __declspec(dllexport) +# else +# define FFTS_API __declspec(dllimport) +# endif +#else +# if (__GNUC__ >= 4) || defined(HAVE_GCC_VISIBILITY) +# define FFTS_API __attribute__ ((visibility("default"))) +# else +# define FFTS_API +# endif +#endif + #define POSITIVE_SIGN 1 #define NEGATIVE_SIGN -1 struct _ffts_plan_t; typedef struct _ffts_plan_t ffts_plan_t; -ffts_plan_t *ffts_init_1d(size_t N, int sign); -ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign); -ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); +FFTS_API ffts_plan_t* +ffts_init_1d(size_t N, int sign); + +FFTS_API ffts_plan_t* +ffts_init_2d(size_t N1, size_t N2, int sign); + +FFTS_API ffts_plan_t* +ffts_init_nd(int rank, size_t *Ns, int sign); /* For real transforms, sign == -1 implies a real-to-complex forwards tranform, and sign == 1 implies a complex-to-real backwards transform. The output of a real-to-complex transform is N/2+1 complex numbers, where the redundant outputs have been omitted. */ -ffts_plan_t *ffts_init_1d_real(size_t N, int sign); -ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign); -ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); +FFTS_API ffts_plan_t* +ffts_init_1d_real(size_t N, int sign); + +FFTS_API ffts_plan_t* +ffts_init_2d_real(size_t N1, size_t N2, int sign); + +FFTS_API ffts_plan_t* +ffts_init_nd_real(int rank, size_t *Ns, int sign); + +FFTS_API void +ffts_execute(ffts_plan_t *p, const void *input, void *output); -void ffts_execute(ffts_plan_t *p, const void *input, void *output); -void ffts_free(ffts_plan_t *p); +FFTS_API void +ffts_free(ffts_plan_t *p); #ifdef __cplusplus } diff --git a/src/ffts.c b/src/ffts.c index a04a92c..a22a1c8 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -149,7 +149,8 @@ static FFTS_INLINE void ffts_vmem_free(void *addr, size_t length) #endif } -void ffts_execute(ffts_plan_t *p, const void *in, void *out) +FFTS_API void +ffts_execute(ffts_plan_t *p, const void *in, void *out) { /* TODO: Define NEEDS_ALIGNED properly instead */ #if defined(HAVE_SSE) || defined(HAVE_NEON) @@ -165,7 +166,8 @@ void ffts_execute(ffts_plan_t *p, const void *in, void *out) p->transform(p, (const float*) in, (float*) out); } -void ffts_free(ffts_plan_t *p) +FFTS_API void +ffts_free(ffts_plan_t *p) { if (p) { p->destroy(p); @@ -409,7 +411,7 @@ cleanup: return -1; } -ffts_plan_t* +FFTS_API ffts_plan_t* ffts_init_1d(size_t N, int sign) { const size_t leaf_N = 8; diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 23338c1..72e21e7 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -281,7 +281,8 @@ static void ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) } } -ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign) +FFTS_API ffts_plan_t* +ffts_init_nd(int rank, size_t *Ns, int sign) { ffts_plan_t *p; size_t vol; @@ -354,7 +355,8 @@ cleanup: return NULL; } -ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign) +FFTS_API ffts_plan_t* +ffts_init_2d(size_t N1, size_t N2, int sign) { size_t Ns[2]; diff --git a/src/ffts_real.c b/src/ffts_real.c index 6650d07..7f41069 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -599,7 +599,7 @@ ffts_execute_1d_real_inv(ffts_plan_t *p, const void *input, void *output) p->plans[0]->transform(p->plans[0], buf, output); } -ffts_plan_t* +FFTS_API ffts_plan_t* ffts_init_1d_real(size_t N, int sign) { ffts_plan_t *p; diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 5eae44b..545e8f0 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -218,7 +218,8 @@ static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) } } -ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign) +FFTS_API ffts_plan_t* +ffts_init_nd_real(int rank, size_t *Ns, int sign) { int i; size_t vol = 1; @@ -327,7 +328,8 @@ cleanup: return NULL; } -ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign) +FFTS_API ffts_plan_t* +ffts_init_2d_real(size_t N1, size_t N2, int sign) { size_t Ns[2]; -- cgit v1.1 From a6e616bb0d5a3777f5bd9880314541880a817d8b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 30 Nov 2015 18:14:04 +0200 Subject: Require CMake >= 2.8.12 to support ALIAS targets --- CMakeLists.txt | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6c72b5b..cad094f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 2.8) +cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) project(ffts C ASM) @@ -414,18 +414,7 @@ else() endif(DISABLE_DYNAMIC_CODE) if(GENERATE_POSITION_INDEPENDENT_CODE) - if(CMAKE_VERSION VERSION_LESS "2.8.9") - check_c_compiler_flag(-fPIC HAVE_GCC_PIC) - if(HAVE_GCC_PIC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC -DPIC") - check_c_compiler_flag(-fPIE HAVE_GCC_PIE) - if(HAVE_GCC_PIE) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fPIE -DPIE") - endif(HAVE_GCC_PIE) - endif(HAVE_GCC_PIC) - else() - set(CMAKE_POSITION_INDEPENDENT_CODE ON) - endif(CMAKE_VERSION VERSION_LESS "2.8.9") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif(GENERATE_POSITION_INDEPENDENT_CODE) if(ENABLE_SHARED) -- cgit v1.1 From 61633e5952d0c59724145b37f60fb446e847a5db Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 30 Nov 2015 18:29:32 +0200 Subject: Try to fix Travis testing --- .travis.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.travis.yml b/.travis.yml index f791aed..e449155 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,5 +2,11 @@ language: c os: - linux - osx +addons: + apt: + packages: + - cmake + sources: + - kalakris-cmake script: - mkdir build && cd build && cmake .. && cmake --build . -- cgit v1.1 From 9320b4ab8d0da95e18964324e904e603a3fc5d2e Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 30 Nov 2015 18:47:41 +0200 Subject: Try to fix Travis testing #2 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e449155..1e70e8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,6 @@ addons: packages: - cmake sources: - - kalakris-cmake + - kubuntu-backports script: - mkdir build && cd build && cmake .. && cmake --build . -- cgit v1.1 From 2051c214d591be08e40fdba623ccefabbba11b29 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 11 Mar 2016 12:24:56 +0200 Subject: Remove unused CMake build option --- CMakeLists.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cad094f..8761229 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,10 +30,6 @@ option(DISABLE_DYNAMIC_CODE "Disables the use of dynamic machine code generation." OFF ) -option(ENABLE_RUNTIME_DYNAMIC_CODE - "Enables the runtime generation of dynamic machine code." ON -) - option(GENERATE_POSITION_INDEPENDENT_CODE "Generate position independent code" OFF ) -- cgit v1.1 From e667ca5e4304b31cd7093eaead481b032092b985 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 11 Mar 2016 14:32:22 +0200 Subject: Restore ARM NEON optimized recursive version --- src/ffts.c | 14 +++++----- src/ffts_static.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 85 insertions(+), 13 deletions(-) diff --git a/src/ffts.c b/src/ffts.c index a22a1c8..5d72a52 100644 --- a/src/ffts.c +++ b/src/ffts.c @@ -55,7 +55,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(HAVE_NEON) static const FFTS_ALIGN(64) float w_data[16] = { 0.70710678118654757273731092936941f, 0.70710678118654746171500846685376f, @@ -227,7 +227,7 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) if (n_luts) { size_t lut_size; -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(__arm__) && !defined(HAVE_NEON) lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f) / 2; #else lut_size = leaf_N * (((1 << n_luts) - 2) * 3 + 1) * sizeof(ffts_cpx_32f); @@ -272,7 +272,7 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w0[j][1] = tmp[j * stride][1]; } -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(__arm__) #ifdef HAVE_NEON for (j = 0; j < n/4; j += 4) { V4SF2 temp0 = V4SF2_LD(fw0 + j*2); @@ -323,7 +323,7 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) w2[j][1] = tmp[(j + (n/8)) * stride][1]; } -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(__arm__) #ifdef HAVE_NEON for (j = 0; j < n/8; j += 4) { V4SF2 temp0, temp1, temp2; @@ -389,11 +389,11 @@ ffts_generate_luts(ffts_plan_t *p, size_t N, size_t leaf_N, int sign) stride >>= 1; } -#if defined(__arm__) && !defined(DYNAMIC_DISABLED) +#if defined(HAVE_NEON) if (sign < 0) { - p->oe_ws = (void*)(&w_data[4]); + p->oe_ws = (void*)(w_data + 4); p->ee_ws = (void*)(w_data); - p->eo_ws = (void*)(&w_data[4]); + p->eo_ws = (void*)(w_data + 4); } else { p->oe_ws = (void*)(w_data + 12); p->ee_ws = (void*)(w_data + 8); diff --git a/src/ffts_static.c b/src/ffts_static.c index 701cca8..7747de0 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -36,6 +36,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts_internal.h" #include "macros.h" +#if defined(HAVE_NEON) +#include "neon.h" +#endif + #include static const FFTS_ALIGN(16) float ffts_constants_small_32f[24] = { @@ -945,6 +949,28 @@ ffts_static_firstpass_even_32f(float *FFTS_RESTRICT out, static void ffts_static_rec_f_32f(ffts_plan_t *p, float *data, size_t N) { +#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) + if (N > 16) { + size_t N1 = N >> 1; + size_t N2 = N >> 2; + size_t N3 = N >> 3; + float *ws = ((float *)(p->ws)) + (p->ws_is[ffts_ctzl(N)-4] << 1); + + ffts_static_rec_f_32f(p, data, N2); + ffts_static_rec_f_32f(p, data + N1, N3); + ffts_static_rec_f_32f(p, data + N1 + N2, N3); + ffts_static_rec_f_32f(p, data + N, N2); + ffts_static_rec_f_32f(p, data + N + N1, N2); + + if (N == p->N) { + neon_static_x8_t_f(data, N, ws); + } else { + neon_static_x8_f(data, N, ws); + } + } else if (N == 16) { + neon_static_x4_f(data, N, p->ws); + } +#else const float *ws = (float*) p->ws; if (N > 128) { @@ -983,11 +1009,34 @@ ffts_static_rec_f_32f(ffts_plan_t *p, float *data, size_t N) assert(N == 16); V4SF_X_4(0, data, N, ws); } +#endif } static void ffts_static_rec_i_32f(ffts_plan_t *p, float *data, size_t N) { +#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) + if (N > 16) { + size_t N1 = N >> 1; + size_t N2 = N >> 2; + size_t N3 = N >> 3; + float *ws = ((float *)(p->ws)) + (p->ws_is[ffts_ctzl(N)-4] << 1); + + ffts_static_rec_i_32f(p, data, N2); + ffts_static_rec_i_32f(p, data + N1, N3); + ffts_static_rec_i_32f(p, data + N1 + N2, N3); + ffts_static_rec_i_32f(p, data + N, N2); + ffts_static_rec_i_32f(p, data + N + N1, N2); + + if (N == p->N) { + neon_static_x8_t_i(data, N, ws); + } else { + neon_static_x8_i(data, N, ws); + } + } else if(N==16) { + neon_static_x4_i(data, N, p->ws); + } +#else float *ws = (float*) p->ws; if (N > 128) { @@ -1026,28 +1075,51 @@ ffts_static_rec_i_32f(ffts_plan_t *p, float *data, size_t N) assert(N == 16); V4SF_X_4(1, data, N, ws); } +#endif } void ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) { + const float *din = (const float*) in; + float *dout = (float*) out; + +#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) if (ffts_ctzl(p->N) & 1) { - ffts_static_firstpass_odd_32f((float*) out, (const float*) in, p, 0); + neon_static_o_f(p, din, dout); } else { - ffts_static_firstpass_even_32f((float*) out, (const float*) in, p, 0); + neon_static_e_f(p, din, dout); } +#else + if (ffts_ctzl(p->N) & 1) { + ffts_static_firstpass_odd_32f(dout, din, p, 0); + } else { + ffts_static_firstpass_even_32f(dout, din, p, 0); + } +#endif - ffts_static_rec_f_32f(p, (float*) out, p->N); + ffts_static_rec_f_32f(p, dout, p->N); } void ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) { + const float *din = (const float*) in; + float *dout = (float*) out; + +#if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) + if (ffts_ctzl(p->N) & 1) { + neon_static_o_i(p, din, dout); + } else { + neon_static_e_i(p, din, dout); + } +#else if (ffts_ctzl(p->N) & 1) { - ffts_static_firstpass_odd_32f((float*) out, (const float*) in, p, 1); + ffts_static_firstpass_odd_32f(dout, din, p, 1); } else { - ffts_static_firstpass_even_32f((float*) out, (const float*) in, p, 1); + ffts_static_firstpass_even_32f(dout, din, p, 1); } +#endif - ffts_static_rec_i_32f(p, (float*) out, p->N); + ffts_static_rec_i_32f(p, dout, p->N); } \ No newline at end of file -- cgit v1.1 From 6296905ad0b45f02a67359370a42168e2d3f1656 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Fri, 11 Mar 2016 14:57:31 +0200 Subject: Resolve undefined reference to `neon_transpose_to_buf' --- CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8761229..5104962 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -361,15 +361,15 @@ set(FFTS_SOURCES ) if(ENABLE_NEON) + list(APPEND FFTS_SOURCES + src/neon.s + ) + if(DISABLE_DYNAMIC_CODE) list(APPEND FFTS_SOURCES src/neon_static_f.s src/neon_static_i.s ) - else() - list(APPEND FFTS_SOURCES - src/neon.s - ) endif(DISABLE_DYNAMIC_CODE) add_definitions(-DHAVE_NEON) -- cgit v1.1 From 61166019c3aa54a26e6e9baeb5af769402e0b616 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 14 Mar 2016 11:35:32 +0200 Subject: Peel off top-level only if-case from ARM NEON recursive implementation --- src/ffts_static.c | 134 ++++++++++++++++++++++++++++++++---------------------- src/neon.h | 85 +++++++++++++++++----------------- 2 files changed, 120 insertions(+), 99 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index 7747de0..483b5e2 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -947,36 +947,31 @@ ffts_static_firstpass_even_32f(float *FFTS_RESTRICT out, } static void -ffts_static_rec_f_32f(ffts_plan_t *p, float *data, size_t N) +ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) { + const float *ws = (const float*) p->ws; + #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) if (N > 16) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; - float *ws = ((float *)(p->ws)) + (p->ws_is[ffts_ctzl(N)-4] << 1); - - ffts_static_rec_f_32f(p, data, N2); - ffts_static_rec_f_32f(p, data + N1, N3); - ffts_static_rec_f_32f(p, data + N1 + N2, N3); - ffts_static_rec_f_32f(p, data + N, N2); - ffts_static_rec_f_32f(p, data + N + N1, N2); - - if (N == p->N) { - neon_static_x8_t_f(data, N, ws); - } else { - neon_static_x8_f(data, N, ws); - } + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; + + ffts_static_rec_f_32f(p, data , N2); + ffts_static_rec_f_32f(p, data + N1 , N3); + ffts_static_rec_f_32f(p, data + N1 + N2, N3); + ffts_static_rec_f_32f(p, data + N , N2); + ffts_static_rec_f_32f(p, data + N + N1 , N2); + + neon_static_x8_f(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); } else if (N == 16) { - neon_static_x4_f(data, N, p->ws); + neon_static_x4_f(data, N, ws); } #else - const float *ws = (float*) p->ws; - if (N > 128) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; ffts_static_rec_f_32f(p, data , N2); ffts_static_rec_f_32f(p, data + N1 , N3); @@ -1013,36 +1008,31 @@ ffts_static_rec_f_32f(ffts_plan_t *p, float *data, size_t N) } static void -ffts_static_rec_i_32f(ffts_plan_t *p, float *data, size_t N) +ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) { + const float *ws = (const float*) p->ws; + #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) if (N > 16) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; - float *ws = ((float *)(p->ws)) + (p->ws_is[ffts_ctzl(N)-4] << 1); - - ffts_static_rec_i_32f(p, data, N2); - ffts_static_rec_i_32f(p, data + N1, N3); - ffts_static_rec_i_32f(p, data + N1 + N2, N3); - ffts_static_rec_i_32f(p, data + N, N2); - ffts_static_rec_i_32f(p, data + N + N1, N2); - - if (N == p->N) { - neon_static_x8_t_i(data, N, ws); - } else { - neon_static_x8_i(data, N, ws); - } - } else if(N==16) { - neon_static_x4_i(data, N, p->ws); + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; + + ffts_static_rec_i_32f(p, data , N2); + ffts_static_rec_i_32f(p, data + N1 , N3); + ffts_static_rec_i_32f(p, data + N1 + N2, N3); + ffts_static_rec_i_32f(p, data + N , N2); + ffts_static_rec_i_32f(p, data + N + N1 , N2); + + neon_static_x8_i(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); + } else if (N == 16) { + neon_static_x4_i(data, N, ws); } #else - float *ws = (float*) p->ws; - if (N > 128) { - size_t N1 = N >> 1; - size_t N2 = N >> 2; - size_t N3 = N >> 3; + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; ffts_static_rec_i_32f(p, data , N2); ffts_static_rec_i_32f(p, data + N1 , N3); @@ -1084,21 +1074,38 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) const float *din = (const float*) in; float *dout = (float*) out; + const size_t N = p->N; + const int N_log_2 = ffts_ctzl(N); + #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - if (ffts_ctzl(p->N) & 1) { + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; + + const float *ws = ((const float*) p->ws) + (p->ws_is[N_log_2 - 4] << 1); + + if (N_log_2 & 1) { neon_static_o_f(p, din, dout); } else { neon_static_e_f(p, din, dout); } + + ffts_static_rec_f_32f(p, dout , N2); + ffts_static_rec_f_32f(p, dout + N1 , N3); + ffts_static_rec_f_32f(p, dout + N1 + N2, N3); + ffts_static_rec_f_32f(p, dout + N , N2); + ffts_static_rec_f_32f(p, dout + N + N1 , N2); + + neon_static_x8_t_f(dout, N, ws); #else - if (ffts_ctzl(p->N) & 1) { + if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 0); } else { ffts_static_firstpass_even_32f(dout, din, p, 0); } -#endif - ffts_static_rec_f_32f(p, dout, p->N); + ffts_static_rec_f_32f(p, dout, N); +#endif } void @@ -1107,19 +1114,36 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) const float *din = (const float*) in; float *dout = (float*) out; + const size_t N = p->N; + const int N_log_2 = ffts_ctzl(N); + #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - if (ffts_ctzl(p->N) & 1) { + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; + + const float *ws = ((const float*) p->ws) + (p->ws_is[N_log_2 - 4] << 1); + + if (N_log_2 & 1) { neon_static_o_i(p, din, dout); } else { neon_static_e_i(p, din, dout); } + + ffts_static_rec_i_32f(p, dout , N2); + ffts_static_rec_i_32f(p, dout + N1 , N3); + ffts_static_rec_i_32f(p, dout + N1 + N2, N3); + ffts_static_rec_i_32f(p, dout + N , N2); + ffts_static_rec_i_32f(p, dout + N + N1 , N2); + + neon_static_x8_t_i(dout, N, ws); #else - if (ffts_ctzl(p->N) & 1) { + if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 1); } else { ffts_static_firstpass_even_32f(dout, din, p, 1); } -#endif - ffts_static_rec_i_32f(p, dout, p->N); + ffts_static_rec_i_32f(p, dout, N); +#endif } \ No newline at end of file diff --git a/src/neon.h b/src/neon.h index 2f51995..b40623b 100644 --- a/src/neon.h +++ b/src/neon.h @@ -1,38 +1,38 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef __NEON_H__ -#define __NEON_H__ +#ifndef FFTS_NEON_H +#define FFTS_NEON_H #include "ffts.h" @@ -48,19 +48,16 @@ void neon_end(); void neon_transpose(uint64_t *in, uint64_t *out, int w, int h); void neon_transpose_to_buf(uint64_t *in, uint64_t *out, int w); -//typedef struct _ffts_plan_t ffts_plan_t; - -void neon_static_e_f(ffts_plan_t * , const void * , void * ); -void neon_static_o_f(ffts_plan_t * , const void * , void * ); -void neon_static_x4_f(float *, size_t, float *); -void neon_static_x8_f(float *, size_t, float *); -void neon_static_x8_t_f(float *, size_t, float *); +void neon_static_e_f(ffts_plan_t*, const void*, void*); +void neon_static_o_f(ffts_plan_t*, const void*, void*); +void neon_static_x4_f(float*, size_t, const float*); +void neon_static_x8_f(float*, size_t, const float*); +void neon_static_x8_t_f(float*, size_t, const float*); -void neon_static_e_i(ffts_plan_t * , const void * , void * ); -void neon_static_o_i(ffts_plan_t * , const void * , void * ); -void neon_static_x4_i(float *, size_t, float *); -void neon_static_x8_i(float *, size_t, float *); -void neon_static_x8_t_i(float *, size_t, float *); +void neon_static_e_i(ffts_plan_t*, const void*, void*); +void neon_static_o_i(ffts_plan_t*, const void*, void*); +void neon_static_x4_i(float*, size_t, const float*); +void neon_static_x8_i(float*, size_t, const float*); +void neon_static_x8_t_i(float*, size_t, const float*); -#endif -// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3: +#endif /* FFTS_NEON_H */ -- cgit v1.1 From 10d4d45b19639c2e5ee9b9289b262285954969c6 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 14 Mar 2016 14:54:43 +0200 Subject: Unroll to minimize recursive function call depth (overhead) --- src/ffts_static.c | 136 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 91 insertions(+), 45 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index 483b5e2..bf52732 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -952,7 +952,7 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) const float *ws = (const float*) p->ws; #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - if (N > 16) { + if (N > 128) { const size_t N1 = N >> 1; const size_t N2 = N >> 2; const size_t N3 = N >> 3; @@ -964,8 +964,27 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) ffts_static_rec_f_32f(p, data + N + N1 , N2); neon_static_x8_f(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); - } else if (N == 16) { - neon_static_x4_f(data, N, ws); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + neon_static_x8_f(data , 32, ws1); + neon_static_x4_f(data + 64, 16, ws); + neon_static_x4_f(data + 96, 16, ws); + neon_static_x8_f(data + 128, 32, ws1); + neon_static_x8_f(data + 192, 32, ws1); + + neon_static_x8_f(data, 128, ws + (p->ws_is[3] << 1)); + } else if (N == 64) { + neon_static_x4_f(data , 16, ws); + neon_static_x4_f(data + 64, 16, ws); + neon_static_x4_f(data + 96, 16, ws); + + neon_static_x8_f(data, 64, ws + (p->ws_is[2] << 1)); + } else if (N == 32) { + neon_static_x8_f(data, 32, ws + (p->ws_is[1] << 1)); + } else { + assert(N == 16); + neon_static_x4_f(data, 16, ws); } #else if (N > 128) { @@ -983,26 +1002,24 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) } else if (N == 128) { const float *ws1 = ws + (p->ws_is[1] << 1); - V4SF_X_8(0, data + 0, 32, ws1); + V4SF_X_8(0, data + 0, 32, ws1); + V4SF_X_4(0, data + 64, 16, ws); + V4SF_X_4(0, data + 96, 16, ws); + V4SF_X_8(0, data + 128, 32, ws1); + V4SF_X_8(0, data + 192, 32, ws1); - V4SF_X_4(0, data + 64, 16, ws); - V4SF_X_4(0, data + 96, 16, ws); - - V4SF_X_8(0, data + 128, 32, ws1); - V4SF_X_8(0, data + 192, 32, ws1); - - V4SF_X_8(0, data, N, ws + (p->ws_is[3] << 1)); + V4SF_X_8(0, data, 128, ws + (p->ws_is[3] << 1)); } else if (N == 64) { V4SF_X_4(0, data + 0, 16, ws); V4SF_X_4(0, data + 64, 16, ws); V4SF_X_4(0, data + 96, 16, ws); - V4SF_X_8(0, data + 0, N, ws + (p->ws_is[2] << 1)); + V4SF_X_8(0, data, 64, ws + (p->ws_is[2] << 1)); } else if (N == 32) { - V4SF_X_8(0, data, N, ws + (p->ws_is[1] << 1)); + V4SF_X_8(0, data, 32, ws + (p->ws_is[1] << 1)); } else { assert(N == 16); - V4SF_X_4(0, data, N, ws); + V4SF_X_4(0, data, 16, ws); } #endif } @@ -1013,7 +1030,7 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) const float *ws = (const float*) p->ws; #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - if (N > 16) { + if (N > 128) { const size_t N1 = N >> 1; const size_t N2 = N >> 2; const size_t N3 = N >> 3; @@ -1025,8 +1042,27 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) ffts_static_rec_i_32f(p, data + N + N1 , N2); neon_static_x8_i(data, N, ws + (p->ws_is[ffts_ctzl(N) - 4] << 1)); - } else if (N == 16) { - neon_static_x4_i(data, N, ws); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + neon_static_x8_i(data , 32, ws1); + neon_static_x4_i(data + 64, 16, ws); + neon_static_x4_i(data + 96, 16, ws); + neon_static_x8_i(data + 128, 32, ws1); + neon_static_x8_i(data + 192, 32, ws1); + + neon_static_x8_i(data, 128, ws + (p->ws_is[3] << 1)); + } else if (N == 64) { + neon_static_x4_i(data , 16, ws); + neon_static_x4_i(data + 64, 16, ws); + neon_static_x4_i(data + 96, 16, ws); + + neon_static_x8_i(data, 64, ws + (p->ws_is[2] << 1)); + } else if (N == 32) { + neon_static_x8_i(data, 32, ws + (p->ws_is[1] << 1)); + } else { + assert(N == 16); + neon_static_x4_i(data, 16, ws); } #else if (N > 128) { @@ -1045,25 +1081,23 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) const float *ws1 = ws + (p->ws_is[1] << 1); V4SF_X_8(1, data + 0, 32, ws1); - V4SF_X_4(1, data + 64, 16, ws); V4SF_X_4(1, data + 96, 16, ws); - V4SF_X_8(1, data + 128, 32, ws1); V4SF_X_8(1, data + 192, 32, ws1); - V4SF_X_8(1, data, N, ws + (p->ws_is[3] << 1)); + V4SF_X_8(1, data, 128, ws + (p->ws_is[3] << 1)); } else if (N == 64) { V4SF_X_4(1, data + 0, 16, ws); V4SF_X_4(1, data + 64, 16, ws); V4SF_X_4(1, data + 96, 16, ws); - V4SF_X_8(1, data + 0, N, ws + (p->ws_is[2] << 1)); + V4SF_X_8(1, data, 64, ws + (p->ws_is[2] << 1)); } else if (N == 32) { - V4SF_X_8(1, data, N, ws + (p->ws_is[1] << 1)); + V4SF_X_8(1, data, 32, ws + (p->ws_is[1] << 1)); } else { assert(N == 16); - V4SF_X_4(1, data, N, ws); + V4SF_X_4(1, data, 16, ws); } #endif } @@ -1078,11 +1112,7 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) const int N_log_2 = ffts_ctzl(N); #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - const size_t N1 = N >> 1; - const size_t N2 = N >> 2; - const size_t N3 = N >> 3; - - const float *ws = ((const float*) p->ws) + (p->ws_is[N_log_2 - 4] << 1); + const float *ws = (const float*) p->ws; if (N_log_2 & 1) { neon_static_o_f(p, din, dout); @@ -1090,13 +1120,23 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) neon_static_e_f(p, din, dout); } - ffts_static_rec_f_32f(p, dout , N2); - ffts_static_rec_f_32f(p, dout + N1 , N3); - ffts_static_rec_f_32f(p, dout + N1 + N2, N3); - ffts_static_rec_f_32f(p, dout + N , N2); - ffts_static_rec_f_32f(p, dout + N + N1 , N2); + if (N > 64) { + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; - neon_static_x8_t_f(dout, N, ws); + ffts_static_rec_f_32f(p, dout , N2); + ffts_static_rec_f_32f(p, dout + N1 , N3); + ffts_static_rec_f_32f(p, dout + N1 + N2, N3); + ffts_static_rec_f_32f(p, dout + N , N2); + ffts_static_rec_f_32f(p, dout + N + N1 , N2); + } else if (N == 64) { + neon_static_x4_f(dout , 16, ws); + neon_static_x4_f(dout + 64, 16, ws); + neon_static_x4_f(dout + 96, 16, ws); + } + + neon_static_x8_t_f(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); #else if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 0); @@ -1118,11 +1158,7 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) const int N_log_2 = ffts_ctzl(N); #if defined(HAVE_NEON) && defined(DYNAMIC_DISABLED) - const size_t N1 = N >> 1; - const size_t N2 = N >> 2; - const size_t N3 = N >> 3; - - const float *ws = ((const float*) p->ws) + (p->ws_is[N_log_2 - 4] << 1); + const float *ws = (const float*) p->ws; if (N_log_2 & 1) { neon_static_o_i(p, din, dout); @@ -1130,13 +1166,23 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) neon_static_e_i(p, din, dout); } - ffts_static_rec_i_32f(p, dout , N2); - ffts_static_rec_i_32f(p, dout + N1 , N3); - ffts_static_rec_i_32f(p, dout + N1 + N2, N3); - ffts_static_rec_i_32f(p, dout + N , N2); - ffts_static_rec_i_32f(p, dout + N + N1 , N2); + if (N > 64) { + const size_t N1 = N >> 1; + const size_t N2 = N >> 2; + const size_t N3 = N >> 3; + + ffts_static_rec_i_32f(p, dout , N2); + ffts_static_rec_i_32f(p, dout + N1 , N3); + ffts_static_rec_i_32f(p, dout + N1 + N2, N3); + ffts_static_rec_i_32f(p, dout + N , N2); + ffts_static_rec_i_32f(p, dout + N + N1 , N2); + } else if (N == 64) { + neon_static_x4_i(dout , 16, ws); + neon_static_x4_i(dout + 64, 16, ws); + neon_static_x4_i(dout + 96, 16, ws); + } - neon_static_x8_t_i(dout, N, ws); + neon_static_x8_t_i(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); #else if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 1); -- cgit v1.1 From 85a7167fdb139dc249330d34120080109878ea8f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 14 Mar 2016 14:56:46 +0200 Subject: Coverage analysis shows unused if-else branches --- src/ffts_static.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index bf52732..00cc96f 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -980,11 +980,9 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) neon_static_x4_f(data + 96, 16, ws); neon_static_x8_f(data, 64, ws + (p->ws_is[2] << 1)); - } else if (N == 32) { - neon_static_x8_f(data, 32, ws + (p->ws_is[1] << 1)); } else { - assert(N == 16); - neon_static_x4_f(data, 16, ws); + assert(N == 32); + neon_static_x8_f(data, 32, ws + (p->ws_is[1] << 1)); } #else if (N > 128) { @@ -1015,11 +1013,9 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) V4SF_X_4(0, data + 96, 16, ws); V4SF_X_8(0, data, 64, ws + (p->ws_is[2] << 1)); - } else if (N == 32) { - V4SF_X_8(0, data, 32, ws + (p->ws_is[1] << 1)); } else { - assert(N == 16); - V4SF_X_4(0, data, 16, ws); + assert(N == 32); + V4SF_X_8(0, data, 32, ws + (p->ws_is[1] << 1)); } #endif } @@ -1058,11 +1054,9 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) neon_static_x4_i(data + 96, 16, ws); neon_static_x8_i(data, 64, ws + (p->ws_is[2] << 1)); - } else if (N == 32) { - neon_static_x8_i(data, 32, ws + (p->ws_is[1] << 1)); } else { - assert(N == 16); - neon_static_x4_i(data, 16, ws); + assert(N == 32); + neon_static_x8_i(data, 32, ws + (p->ws_is[1] << 1)); } #else if (N > 128) { @@ -1093,11 +1087,9 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) V4SF_X_4(1, data + 96, 16, ws); V4SF_X_8(1, data, 64, ws + (p->ws_is[2] << 1)); - } else if (N == 32) { - V4SF_X_8(1, data, 32, ws + (p->ws_is[1] << 1)); } else { - assert(N == 16); - V4SF_X_4(1, data, 16, ws); + assert(N == 32); + V4SF_X_8(1, data, 32, ws + (p->ws_is[1] << 1)); } #endif } @@ -1120,7 +1112,7 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) neon_static_e_f(p, din, dout); } - if (N > 64) { + if (N > 128) { const size_t N1 = N >> 1; const size_t N2 = N >> 2; const size_t N3 = N >> 3; @@ -1130,6 +1122,14 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) ffts_static_rec_f_32f(p, dout + N1 + N2, N3); ffts_static_rec_f_32f(p, dout + N , N2); ffts_static_rec_f_32f(p, dout + N + N1 , N2); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + neon_static_x8_f(dout , 32, ws1); + neon_static_x4_f(dout + 64, 16, ws); + neon_static_x4_f(dout + 96, 16, ws); + neon_static_x8_f(dout + 128, 32, ws1); + neon_static_x8_f(dout + 192, 32, ws1); } else if (N == 64) { neon_static_x4_f(dout , 16, ws); neon_static_x4_f(dout + 64, 16, ws); @@ -1166,7 +1166,7 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) neon_static_e_i(p, din, dout); } - if (N > 64) { + if (N > 128) { const size_t N1 = N >> 1; const size_t N2 = N >> 2; const size_t N3 = N >> 3; @@ -1176,6 +1176,14 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) ffts_static_rec_i_32f(p, dout + N1 + N2, N3); ffts_static_rec_i_32f(p, dout + N , N2); ffts_static_rec_i_32f(p, dout + N + N1 , N2); + } else if (N == 128) { + const float *ws1 = ws + (p->ws_is[1] << 1); + + neon_static_x8_i(dout , 32, ws1); + neon_static_x4_i(dout + 64, 16, ws); + neon_static_x4_i(dout + 96, 16, ws); + neon_static_x8_i(dout + 128, 32, ws1); + neon_static_x8_i(dout + 192, 32, ws1); } else if (N == 64) { neon_static_x4_i(dout , 16, ws); neon_static_x4_i(dout + 64, 16, ws); -- cgit v1.1 From f64d89a9385e5981a3e175a205ee3fdf69773e61 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 15 Mar 2016 11:08:23 +0200 Subject: neon_static_x4_f and neon_static_x4_i don't use the second passed argument, and reschedule instructions for possible dual issue --- src/ffts_static.c | 86 ++++++++++++++++++++++++++++++--------------------- src/neon.h | 4 +-- src/neon_static_f.s | 88 ++++++++++++++++++++++++++--------------------------- src/neon_static_i.s | 88 ++++++++++++++++++++++++++--------------------------- 4 files changed, 140 insertions(+), 126 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index 00cc96f..e1b2f6b 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -967,17 +967,19 @@ ffts_static_rec_f_32f(const ffts_plan_t *p, float *data, size_t N) } else if (N == 128) { const float *ws1 = ws + (p->ws_is[1] << 1); - neon_static_x8_f(data , 32, ws1); - neon_static_x4_f(data + 64, 16, ws); - neon_static_x4_f(data + 96, 16, ws); + neon_static_x8_f(data, 32, ws1); + + neon_static_x4_f(data + 64, ws); + neon_static_x4_f(data + 96, ws); + neon_static_x8_f(data + 128, 32, ws1); neon_static_x8_f(data + 192, 32, ws1); neon_static_x8_f(data, 128, ws + (p->ws_is[3] << 1)); } else if (N == 64) { - neon_static_x4_f(data , 16, ws); - neon_static_x4_f(data + 64, 16, ws); - neon_static_x4_f(data + 96, 16, ws); + neon_static_x4_f(data , ws); + neon_static_x4_f(data + 64, ws); + neon_static_x4_f(data + 96, ws); neon_static_x8_f(data, 64, ws + (p->ws_is[2] << 1)); } else { @@ -1041,17 +1043,19 @@ ffts_static_rec_i_32f(const ffts_plan_t *p, float *data, size_t N) } else if (N == 128) { const float *ws1 = ws + (p->ws_is[1] << 1); - neon_static_x8_i(data , 32, ws1); - neon_static_x4_i(data + 64, 16, ws); - neon_static_x4_i(data + 96, 16, ws); + neon_static_x8_i(data, 32, ws1); + + neon_static_x4_i(data + 64, ws); + neon_static_x4_i(data + 96, ws); + neon_static_x8_i(data + 128, 32, ws1); neon_static_x8_i(data + 192, 32, ws1); neon_static_x8_i(data, 128, ws + (p->ws_is[3] << 1)); } else if (N == 64) { - neon_static_x4_i(data , 16, ws); - neon_static_x4_i(data + 64, 16, ws); - neon_static_x4_i(data + 96, 16, ws); + neon_static_x4_i(data , ws); + neon_static_x4_i(data + 64, ws); + neon_static_x4_i(data + 96, ws); neon_static_x8_i(data, 64, ws + (p->ws_is[2] << 1)); } else { @@ -1122,21 +1126,28 @@ ffts_static_transform_f_32f(ffts_plan_t *p, const void *in, void *out) ffts_static_rec_f_32f(p, dout + N1 + N2, N3); ffts_static_rec_f_32f(p, dout + N , N2); ffts_static_rec_f_32f(p, dout + N + N1 , N2); + + neon_static_x8_t_f(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); } else if (N == 128) { - const float *ws1 = ws + (p->ws_is[1] << 1); + neon_static_x8_f(dout, 32, ws + 8); + + neon_static_x4_f(dout + 64, ws); + neon_static_x4_f(dout + 96, ws); + + neon_static_x8_f(dout + 128, 32, ws + 8); + neon_static_x8_f(dout + 192, 32, ws + 8); - neon_static_x8_f(dout , 32, ws1); - neon_static_x4_f(dout + 64, 16, ws); - neon_static_x4_f(dout + 96, 16, ws); - neon_static_x8_f(dout + 128, 32, ws1); - neon_static_x8_f(dout + 192, 32, ws1); + neon_static_x8_t_f(dout, 128, ws + 80); } else if (N == 64) { - neon_static_x4_f(dout , 16, ws); - neon_static_x4_f(dout + 64, 16, ws); - neon_static_x4_f(dout + 96, 16, ws); - } + neon_static_x4_f(dout , ws); + neon_static_x4_f(dout + 64, ws); + neon_static_x4_f(dout + 96, ws); - neon_static_x8_t_f(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); + neon_static_x8_t_f(dout, 64, ws + 32); + } else { + assert(N == 32); + neon_static_x8_t_f(dout, 32, ws + 8); + } #else if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 0); @@ -1176,21 +1187,28 @@ ffts_static_transform_i_32f(ffts_plan_t *p, const void *in, void *out) ffts_static_rec_i_32f(p, dout + N1 + N2, N3); ffts_static_rec_i_32f(p, dout + N , N2); ffts_static_rec_i_32f(p, dout + N + N1 , N2); + + neon_static_x8_t_i(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); } else if (N == 128) { - const float *ws1 = ws + (p->ws_is[1] << 1); + neon_static_x8_i(dout, 32, ws + 8); + + neon_static_x4_i(dout + 64, ws); + neon_static_x4_i(dout + 96, ws); - neon_static_x8_i(dout , 32, ws1); - neon_static_x4_i(dout + 64, 16, ws); - neon_static_x4_i(dout + 96, 16, ws); - neon_static_x8_i(dout + 128, 32, ws1); - neon_static_x8_i(dout + 192, 32, ws1); + neon_static_x8_i(dout + 128, 32, ws + 8); + neon_static_x8_i(dout + 192, 32, ws + 8); + + neon_static_x8_t_i(dout, 128, ws + 80); } else if (N == 64) { - neon_static_x4_i(dout , 16, ws); - neon_static_x4_i(dout + 64, 16, ws); - neon_static_x4_i(dout + 96, 16, ws); - } + neon_static_x4_i(dout , ws); + neon_static_x4_i(dout + 64, ws); + neon_static_x4_i(dout + 96, ws); - neon_static_x8_t_i(dout, N, ws + (p->ws_is[N_log_2 - 4] << 1)); + neon_static_x8_t_i(dout, 64, ws + 32); + } else { + assert(N == 32); + neon_static_x8_t_i(dout, 32, ws + 8); + } #else if (N_log_2 & 1) { ffts_static_firstpass_odd_32f(dout, din, p, 1); diff --git a/src/neon.h b/src/neon.h index b40623b..66dcd4b 100644 --- a/src/neon.h +++ b/src/neon.h @@ -50,13 +50,13 @@ void neon_transpose_to_buf(uint64_t *in, uint64_t *out, int w); void neon_static_e_f(ffts_plan_t*, const void*, void*); void neon_static_o_f(ffts_plan_t*, const void*, void*); -void neon_static_x4_f(float*, size_t, const float*); +void neon_static_x4_f(float*, const float*); void neon_static_x8_f(float*, size_t, const float*); void neon_static_x8_t_f(float*, size_t, const float*); void neon_static_e_i(ffts_plan_t*, const void*, void*); void neon_static_o_i(ffts_plan_t*, const void*, void*); -void neon_static_x4_i(float*, size_t, const float*); +void neon_static_x4_i(float*, const float*); void neon_static_x8_i(float*, size_t, const float*); void neon_static_x8_t_i(float*, size_t, const float*); diff --git a/src/neon_static_f.s b/src/neon_static_f.s index bb0d717..e1e9a4a 100644 --- a/src/neon_static_f.s +++ b/src/neon_static_f.s @@ -652,59 +652,57 @@ _neon_ee_o_loop2_exit: vldmia sp!, {d8-d15} pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x4_f + .globl _neon_static_x4_f _neon_static_x4_f: #else - .globl neon_static_x4_f + .globl neon_static_x4_f neon_static_x4_f: #endif -@ add r3, r0, #0 - push {r4, r5, r6, lr} - vstmdb sp!, {d8-d15} + add r3, r0, #64 + vpush {q4-q7} - vld1.32 {q8,q9}, [r0, :128] - add r4, r0, r1, lsl #1 - vld1.32 {q10,q11}, [r4, :128] - add r5, r0, r1, lsl #2 - vld1.32 {q12,q13}, [r5, :128] - add r6, r4, r1, lsl #2 - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q2,q3}, [r2, :128] - - vmul.f32 q0, q13, q3 - vmul.f32 q5, q12, q2 - vmul.f32 q1, q14, q2 - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vadd.f32 q2, q10, q13 @ - vsub.f32 q4, q8, q15 - vsub.f32 q3, q11, q12 @ - vst1.32 {q0,q1}, [r0, :128] - vsub.f32 q5, q9, q14 - vsub.f32 q6, q10, q13 @ - vadd.f32 q7, q11, q12 @ - vst1.32 {q2,q3}, [r4, :128] - vst1.32 {q4,q5}, [r5, :128] - vst1.32 {q6,q7}, [r6, :128] - vldmia sp!, {d8-d15} - pop {r4, r5, r6, pc} + vld1.32 {q2, q3}, [r1, :128] + vld1.32 {q12, q13}, [r3, :128]! + mov r2, r0 + vmul.f32 q0, q13, q3 + vld1.32 {q14, q15}, [r3, :128] + vmul.f32 q5, q12, q2 + vld1.32 {q8, q9}, [r0, :128]! + vmul.f32 q1, q14, q2 + vld1.32 {q10, q11}, [r0, :128] + vmul.f32 q4, q14, q3 + vmul.f32 q14, q12, q3 + vmul.f32 q13, q13, q2 + vmul.f32 q12, q15, q3 + vmul.f32 q2, q15, q2 + vsub.f32 q0, q5, q0 + vadd.f32 q13, q13, q14 + vadd.f32 q12, q12, q1 + vsub.f32 q1, q2, q4 + vadd.f32 q15, q0, q12 + vsub.f32 q12, q0, q12 + vadd.f32 q14, q13, q1 + vsub.f32 q13, q13, q1 + vadd.f32 q0, q8, q15 + vadd.f32 q1, q9, q14 + vadd.f32 q2, q10, q13 + vsub.f32 q4, q8, q15 + vsub.f32 q3, q11, q12 + + vst1.32 {q0, q1}, [r2, :128]! + + vsub.f32 q5, q9, q14 + vsub.f32 q6, q10, q13 + vadd.f32 q7, q11, q12 + vst1.32 {q2, q3}, [r2, :128]! + vst1.32 {q4, q5}, [r2, :128]! + vst1.32 {q6, q7}, [r2, :128] + vpop {q4-q7} + bx lr .align 4 #ifdef __APPLE__ diff --git a/src/neon_static_i.s b/src/neon_static_i.s index 5edc908..d8f8d9c 100644 --- a/src/neon_static_i.s +++ b/src/neon_static_i.s @@ -651,59 +651,57 @@ _neon_ee_o_loop2_exit: vldmia sp!, {d8-d15} pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x4_i + .globl _neon_static_x4_i _neon_static_x4_i: #else - .globl neon_static_x4_i + .globl neon_static_x4_i neon_static_x4_i: #endif -@ add r3, r0, #0 - push {r4, r5, r6, lr} - vstmdb sp!, {d8-d15} + add r3, r0, #64 + vpush {q4-q7} - vld1.32 {q8,q9}, [r0, :128] - add r4, r0, r1, lsl #1 - vld1.32 {q10,q11}, [r4, :128] - add r5, r0, r1, lsl #2 - vld1.32 {q12,q13}, [r5, :128] - add r6, r4, r1, lsl #2 - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q2,q3}, [r2, :128] - - vmul.f32 q0, q13, q3 - vmul.f32 q5, q12, q2 - vmul.f32 q1, q14, q2 - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vsub.f32 q2, q10, q13 @ - vsub.f32 q4, q8, q15 - vadd.f32 q3, q11, q12 @ - vst1.32 {q0,q1}, [r0, :128] - vsub.f32 q5, q9, q14 - vadd.f32 q6, q10, q13 @ - vsub.f32 q7, q11, q12 @ - vst1.32 {q2,q3}, [r4, :128] - vst1.32 {q4,q5}, [r5, :128] - vst1.32 {q6,q7}, [r6, :128] - vldmia sp!, {d8-d15} - pop {r4, r5, r6, pc} + vld1.32 {q2, q3}, [r1, :128] + vld1.32 {q12, q13}, [r3, :128]! + mov r2, r0 + vmul.f32 q0, q13, q3 + vld1.32 {q14, q15}, [r3, :128] + vmul.f32 q5, q12, q2 + vld1.32 {q8, q9}, [r0, :128]! + vmul.f32 q1, q14, q2 + vld1.32 {q10, q11}, [r0, :128] + vmul.f32 q4, q14, q3 + vmul.f32 q14, q12, q3 + vmul.f32 q13, q13, q2 + vmul.f32 q12, q15, q3 + vmul.f32 q2, q15, q2 + vsub.f32 q0, q5, q0 + vadd.f32 q13, q13, q14 + vadd.f32 q12, q12, q1 + vsub.f32 q1, q2, q4 + vadd.f32 q15, q0, q12 + vsub.f32 q12, q0, q12 + vadd.f32 q14, q13, q1 + vsub.f32 q13, q13, q1 + vadd.f32 q0, q8, q15 + vadd.f32 q1, q9, q14 + vsub.f32 q2, q10, q13 + vsub.f32 q4, q8, q15 + vadd.f32 q3, q11, q12 + + vst1.32 {q0, q1}, [r2, :128]! + + vsub.f32 q5, q9, q14 + vadd.f32 q6, q10, q13 + vsub.f32 q7, q11, q12 + vst1.32 {q2, q3}, [r2, :128]! + vst1.32 {q4, q5}, [r2, :128]! + vst1.32 {q6, q7}, [r2, :128] + vpop {q4-q7} + bx lr .align 4 #ifdef __APPLE__ -- cgit v1.1 From 52beaeb452af124551dbc8bdb29e2cccfce4d2b7 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 16 Mar 2016 11:50:22 +0200 Subject: Clean and optimize ARM Neon static transform, 4-5% faster --- src/neon_static_f.s | 1681 +++++++++++++++++++++++++-------------------------- src/neon_static_i.s | 1680 +++++++++++++++++++++++++------------------------- 2 files changed, 1624 insertions(+), 1737 deletions(-) diff --git a/src/neon_static_f.s b/src/neon_static_f.s index e1e9a4a..452d8d4 100644 --- a/src/neon_static_f.s +++ b/src/neon_static_f.s @@ -1,663 +1,614 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2016, Jukka Ojanen +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - .fpu neon + .fpu neon - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_e_f + .globl _neon_static_e_f _neon_static_e_f: #else - .globl neon_static_e_f + .globl neon_static_e_f neon_static_e_f: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - ldr lr, [r0, #40] @ this is p->N - add r3, r1, #0 - add r7, r1, lr - add r5, r7, lr - add r10, r5, lr - add r4, r10, lr - add r8, r4, lr - add r6, r8, lr - add r9, r6, lr - ldr r12, [r0] - add r1, r0, #0 - add r0, r2, #0 - ldr r2, [r1, #16] @ this is p->ee_ws - ldr r11, [r1, #28] @ this is p->i0 - - vld1.32 {d16, d17}, [r2, :128] -_neon_ee_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 @ - vadd.f32 d28, d4, d3 @ - vsub.f32 d30, d4, d3 @ - vsub.f32 d5, d19, d14 @ - vsub.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 @ - vadd.f32 d4, d18, d15 @ - vadd.f32 d13, d19, d14 @ - vsub.f32 d12, d18, d15 @ - vadd.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_loop - - ldr r11, [r1, #12] - vld2.32 {q9}, [r5, :128]! @tag2 - vld2.32 {q13}, [r3, :128]! @tag0 - vld2.32 {q12}, [r4, :128]! @tag1 - vld2.32 {q0}, [r7, :128]! @tag4 - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! @tag3 - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vsub.f32 d9, d23, d20 @ - vadd.f32 d11, d23, d20 @ - vsub.f32 q8, q12, q8 - vadd.f32 d8, d22, d21 @ - vsub.f32 d10, d22, d21 @ - ldr r2, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r2, r0, r2, lsl #2 - vtrn.32 q8, q5 - add lr, r0, lr, lsl #2 - vswp d9,d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! @tag7 - vld2.32 {q15}, [r9, :128]! @tag6 - vld2.32 {q11}, [r8, :128]! @tag5 - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vsub.f32 d13, d29, d24 @ - vadd.f32 q15, q13, q11 - vadd.f32 d12, d28, d25 @ - vadd.f32 d15, d29, d24 @ - vsub.f32 d14, d28, d25 @ - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vsub.f32 d3, d17, d20 @ - vadd.f32 d7, d17, d20 @ - vadd.f32 d2, d16, d21 @ - vsub.f32 d6, d16, d21 @ - vswp d1, d2 - vswp d5, d6 - vstmia r2!, {q0-q3} - - add r2, r7, #0 - add r7, r9, #0 - add r9, r2, #0 - add r2, r8, #0 - add r8, r10, #0 - add r10, r2, #0 - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_oo_loop_exit -_neon_oo_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r7, :128]! - vld2.32 {q13}, [r9, :128]! - vsub.f32 q2, q12, q11 - vadd.f32 d7, d19, d16 @ - vsub.f32 d3, d19, d16 @ - vsub.f32 d6, d18, d17 @ - vadd.f32 d2, d18, d17 @ - vld2.32 {q9}, [r8, :128]! - vld2.32 {q8}, [r10, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r2, [r12], #4 - vadd.f32 d15, d19, d16 @ - ldr lr, [r12], #4 - vsub.f32 d11, d19, d16 @ - vsub.f32 d14, d18, d17 @ - vadd.f32 d10, d18, d17 @ - add r2, r0, r2, lsl #2 - vtrn.32 q1, q3 - add lr, r0, lr, lsl #2 - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_loop -_neon_oo_loop_exit: - - - add r2, r3, #0 - add r3, r7, #0 - add r7, r2, #0 - add r2, r4, #0 - add r4, r8, #0 - add r8, r2, #0 - add r2, r5, #0 - add r5, r9, #0 - add r9, r2, #0 - add r2, r6, #0 - add r6, r10, #0 - add r10, r2, #0 - add r2, r9, #0 - add r9, r10, #0 - add r10, r2, #0 - ldr r2, [r1, #16] - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_ee_loop2_exit - - vld1.32 {d16, d17}, [r2, :128] -_neon_ee_loop2: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 @ - vadd.f32 d28, d4, d3 @ - vsub.f32 d30, d4, d3 @ - vsub.f32 d5, d19, d14 @ - vsub.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 @ - vadd.f32 d4, d18, d15 @ - vadd.f32 d13, d19, d14 @ - vsub.f32 d12, d18, d15 @ - vadd.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_loop2 -_neon_ee_loop2_exit: + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop + + ldr r11, [r0, #12] + vld2.32 {q9}, [r5, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q12}, [r4, :128]! + vld2.32 {q0}, [r7, :128]! + vsub.f32 q11, q13, q12 + vld2.32 {q8}, [r6, :128]! + vadd.f32 q12, q13, q12 + vsub.f32 q10, q9, q8 + vadd.f32 q8, q9, q8 + vadd.f32 q9, q12, q8 + vsub.f32 d9, d23, d20 + vadd.f32 d11, d23, d20 + vsub.f32 q8, q12, q8 + vadd.f32 d8, d22, d21 + vsub.f32 d10, d22, d21 + ldr r3, [r12], #4 + vld1.32 {d20, d21}, [r11, :128] + ldr lr, [r12], #4 + vtrn.32 q9, q4 + add r3, r2, r3, lsl #2 + vtrn.32 q8, q5 + add lr, r2, lr, lsl #2 + vswp d9, d10 + vst1.32 {d8,d9,d10,d11}, [lr, :128]! + vld2.32 {q13}, [r10, :128]! + vld2.32 {q15}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vsub.f32 q14, q15, q13 + vsub.f32 q12, q0, q11 + vadd.f32 q11, q0, q11 + vadd.f32 q13, q15, q13 + vsub.f32 d13, d29, d24 + vadd.f32 q15, q13, q11 + vadd.f32 d12, d28, d25 + vadd.f32 d15, d29, d24 + vsub.f32 d14, d28, d25 + vtrn.32 q15, q6 + vsub.f32 q15, q13, q11 + vtrn.32 q15, q7 + vswp d13, d14 + vst1.32 {d12,d13,d14,d15}, [lr, :128]! + vtrn.32 q13, q14 + vtrn.32 q11, q12 + vmul.f32 d24, d26, d21 + vmul.f32 d28, d27, d20 + vmul.f32 d25, d26, d20 + vmul.f32 d26, d27, d21 + vmul.f32 d27, d22, d21 + vmul.f32 d30, d23, d20 + vmul.f32 d29, d23, d21 + vmul.f32 d22, d22, d20 + vsub.f32 d21, d28, d24 + vadd.f32 d20, d26, d25 + vadd.f32 d25, d30, d27 + vsub.f32 d24, d22, d29 + vadd.f32 q11, q12, q10 + ldr r11, [r0, #32] @ p->i1 + vsub.f32 q10, q12, q10 + vadd.f32 q0, q9, q11 + vsub.f32 q2, q9, q11 + vsub.f32 d3, d17, d20 + vadd.f32 d7, d17, d20 + vadd.f32 d2, d16, d21 + vsub.f32 d6, d16, d21 + cmp r11, #0 + vswp d1, d2 + vswp d5, d6 + vstmia r3!, {q0-q3} + beq _neon_ee_loop2_exit +_neon_oo_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vadd.f32 d7, d19, d16 + vsub.f32 d3, d19, d16 + vsub.f32 d6, d18, d17 + vadd.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vadd.f32 d15, d19, d16 + ldr lr, [r12], #4 + vsub.f32 d11, d19, d16 + vsub.f32 d14, d18, d17 + vadd.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_oo_loop + + ldr r3, [r0, #16] @ p->ee_ws + ldr r11, [r0, #32] @ p->i1 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop2 +_neon_ee_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_o_f + .globl _neon_static_o_f _neon_static_o_f: #else - .globl neon_static_o_f + .globl neon_static_o_f neon_static_o_f: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - ldr lr, [r0, #40] @ this is p->N - add r3, r1, #0 - add r7, r1, lr - add r5, r7, lr - add r10, r5, lr - add r4, r10, lr - add r8, r4, lr - add r6, r8, lr - add r9, r6, lr - ldr r12, [r0] - add r1, r0, #0 - add r0, r2, #0 - ldr r2, [r1, #16] @ this is p->ee_ws - ldr r11, [r1, #28] @ this is p->i0 - - vld1.32 {d16, d17}, [r2, :128] + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] _neon_ee_o_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 @ - vadd.f32 d28, d4, d3 @ - vsub.f32 d30, d4, d3 @ - vsub.f32 d5, d19, d14 @ - vsub.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 @ - vadd.f32 d4, d18, d15 @ - vadd.f32 d13, d19, d14 @ - vsub.f32 d12, d18, d15 @ - vadd.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_o_loop - - add r2, r7, #0 - add r7, r9, #0 - add r9, r2, #0 - add r2, r8, #0 - add r8, r10, #0 - add r10, r2, #0 - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_oo_o_loop_exit + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop + + ldr r11, [r0, #32] @ p->i1 + cmp r11, #0 + beq _neon_oo_o_loop_exit _neon_oo_o_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r7, :128]! - vld2.32 {q13}, [r9, :128]! - vsub.f32 q2, q12, q11 - vadd.f32 d7, d19, d16 @ - vsub.f32 d3, d19, d16 @ - vsub.f32 d6, d18, d17 @ - vadd.f32 d2, d18, d17 @ - vld2.32 {q9}, [r8, :128]! - vld2.32 {q8}, [r10, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r2, [r12], #4 - vadd.f32 d15, d19, d16 @ - ldr lr, [r12], #4 - vsub.f32 d11, d19, d16 @ - vsub.f32 d14, d18, d17 @ - vadd.f32 d10, d18, d17 @ - add r2, r0, r2, lsl #2 - vtrn.32 q1, q3 - add lr, r0, lr, lsl #2 - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop -_neon_oo_o_loop_exit: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vadd.f32 d7, d19, d16 + vsub.f32 d3, d19, d16 + vsub.f32 d6, d18, d17 + vadd.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vadd.f32 d15, d19, d16 + ldr lr, [r12], #4 + vsub.f32 d11, d19, d16 + vsub.f32 d14, d18, d17 + vadd.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4,q5}, [r3, :128]! + vst2.32 {q6,q7}, [lr, :128]! + bne _neon_oo_o_loop - ldr r11, [r1, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vld2.32 {q15}, [r10, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r2, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r2, r0, r2, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r0, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vsub.f32 d25, d19, d16 @ - vadd.f32 d27, d19, d16 @ - vsub.f32 q1, q11, q10 - vadd.f32 d24, d18, d17 @ - vsub.f32 d26, d18, d17 @ - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r2, :128]! - vld2.32 {q0}, [r9, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vsub.f32 d29, d1, d30 @ - vadd.f32 d27, d1, d30 @ - vsub.f32 q3, q3, q1 - vadd.f32 d28, d0, d31 @ - vsub.f32 d26, d0, d31 @ - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r2, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - vsub.f32 d21, d30, d19 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vsub.f32 d11, d27, d16 @ - vadd.f32 d15, d27, d16 @ - vadd.f32 d10, d26, d17 @ - vsub.f32 d14, d26, d17 @ - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - - - add r2, r3, #0 - add r3, r7, #0 - add r7, r2, #0 - add r2, r4, #0 - add r4, r8, #0 - add r8, r2, #0 - add r2, r5, #0 - add r5, r9, #0 - add r9, r2, #0 - add r2, r6, #0 - add r6, r10, #0 - add r10, r2, #0 - add r2, r9, #0 - add r9, r10, #0 - add r10, r2, #0 - ldr r2, [r1, #16] - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_ee_o_loop2_exit - - vld1.32 {d16, d17}, [r2, :128] +_neon_oo_o_loop_exit: + ldr r11, [r0, #8] + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q15}, [r8, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r3, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r3, r2, r3, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r2, lr, lsl #2 + vadd.f32 q10, q10, q12 + vadd.f32 q0, q11, q10 + vsub.f32 d25, d19, d16 + vadd.f32 d27, d19, d16 + vsub.f32 q1, q11, q10 + vadd.f32 d24, d18, d17 + vsub.f32 d26, d18, d17 + vtrn.32 q0, q12 + vtrn.32 q1, q13 + vld1.32 {d24, d25}, [r11, :128] + vswp d1, d2 + vst1.32 {q0, q1}, [r3, :128]! + vld2.32 {q0}, [r7, :128]! + vadd.f32 q1, q0, q15 + vld2.32 {q13}, [r10, :128]! + vld2.32 {q14}, [r9, :128]! + vsub.f32 q15, q0, q15 + vsub.f32 q0, q14, q13 + vadd.f32 q3, q14, q13 + vadd.f32 q2, q3, q1 + vsub.f32 d29, d1, d30 + vadd.f32 d27, d1, d30 + vsub.f32 q3, q3, q1 + vadd.f32 d28, d0, d31 + vsub.f32 d26, d0, d31 + vtrn.32 q2, q14 + vtrn.32 q3, q13 + vswp d5, d6 + vst1.32 {q2, q3}, [r3, :128]! + vtrn.32 q11, q9 + vtrn.32 q10, q8 + vmul.f32 d20, d18, d25 + vmul.f32 d22, d19, d24 + vmul.f32 d21, d19, d25 + vmul.f32 d18, d18, d24 + vmul.f32 d19, d16, d25 + vmul.f32 d30, d17, d24 + vmul.f32 d23, d16, d24 + vmul.f32 d24, d17, d25 + vadd.f32 d17, d22, d20 + vsub.f32 d16, d18, d21 + ldr r3, [r0, #16] @ p->ee_ws + vsub.f32 d21, d30, d19 + ldr r11, [r0, #32] @ p->i1 + vadd.f32 d20, d24, d23 + vadd.f32 q9, q8, q10 + vsub.f32 q8, q8, q10 + vadd.f32 q4, q14, q9 + vsub.f32 q6, q14, q9 + vsub.f32 d11, d27, d16 + vadd.f32 d15, d27, d16 + vadd.f32 d10, d26, d17 + vsub.f32 d14, d26, d17 + cmp r11, #0 + vswp d9, d10 + vswp d13, d14 + vstmia lr!, {q4-q7} + beq _neon_ee_o_loop2_exit + + vld1.32 {d16, d17}, [r3, :128] _neon_ee_o_loop2: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 @ - vadd.f32 d28, d4, d3 @ - vsub.f32 d30, d4, d3 @ - vsub.f32 d5, d19, d14 @ - vsub.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 @ - vadd.f32 d4, d18, d15 @ - vadd.f32 d13, d19, d14 @ - vsub.f32 d12, d18, d15 @ - vadd.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_o_loop2 -_neon_ee_o_loop2_exit: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop2 - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} +_neon_ee_o_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x4_f + .globl _neon_static_x4_f _neon_static_x4_f: #else - .globl neon_static_x4_f + .globl neon_static_x4_f neon_static_x4_f: #endif add r3, r0, #64 @@ -704,252 +655,244 @@ neon_static_x4_f: vpop {q4-q7} bx lr - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x8_f + .globl _neon_static_x8_f _neon_static_x8_f: #else - .globl neon_static_x8_f + .globl neon_static_x8_f neon_static_x8_f: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vadd.f32 q4, q12, q15 @ - vsub.f32 q6, q12, q15 @ - vsub.f32 q5, q13, q14 @ - vadd.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vadd.f32 q2, q8, q10 @ - vsub.f32 q3, q9, q12 @ - vst1.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vadd.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst1.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst1.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vadd.f32 q2, q10, q15 @ - vsub.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst1.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vsub.f32 q6, q10, q15 @ - vst1.32 {q2,q3}, [r6, :128]! - vadd.f32 q7, q11, q14 @ - vst1.32 {q4,q5}, [r8, :128]! - vst1.32 {q6,q7}, [r10, :128]! - bne neon_x8_loop - - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} - - .align 4 + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vadd.f32 q4, q12, q15 + vsub.f32 q6, q12, q15 + vsub.f32 q5, q13, q14 + vadd.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vadd.f32 q2, q8, q10 + vsub.f32 q3, q9, q12 + vst1.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vadd.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst1.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vsub.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst1.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst1.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vadd.f32 q2, q10, q15 + vsub.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst1.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vsub.f32 q6, q10, q15 + vst1.32 {q2, q3}, [r5, :128]! + vadd.f32 q7, q11, q14 + vst1.32 {q4, q5}, [r7, :128]! + vst1.32 {q6, q7}, [r12, :128]! + bne neon_x8_loop + + vpop {q4-q7} + pop {r4-r8, pc} + + .align 4 #ifdef __APPLE__ - .globl _neon_static_x8_t_f + .globl _neon_static_x8_t_f _neon_static_x8_t_f: #else - .globl neon_static_x8_t_f + .globl neon_static_x8_t_f neon_static_x8_t_f: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_t_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vadd.f32 q4, q12, q15 @ - vsub.f32 q6, q12, q15 @ - vsub.f32 q5, q13, q14 @ - vadd.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vadd.f32 q2, q8, q10 @ - vsub.f32 q3, q9, q12 @ - vst2.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vadd.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst2.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst2.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vadd.f32 q2, q10, q15 @ - vsub.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst2.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vsub.f32 q6, q10, q15 @ - vst2.32 {q2,q3}, [r6, :128]! - vadd.f32 q7, q11, q14 @ - vst2.32 {q4,q5}, [r8, :128]! - vst2.32 {q6,q7}, [r10, :128]! - bne neon_x8_t_loop - - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} + push {r4-r8, lr} + vpush {q4-q7} + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_t_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vadd.f32 q4, q12, q15 + vsub.f32 q6, q12, q15 + vsub.f32 q5, q13, q14 + vadd.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vadd.f32 q2, q8, q10 + vsub.f32 q3, q9, q12 + vst2.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vadd.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst2.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vsub.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst2.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst2.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vadd.f32 q2, q10, q15 + vsub.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst2.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vsub.f32 q6, q10, q15 + vst2.32 {q2, q3}, [r5, :128]! + vadd.f32 q7, q11, q14 + vst2.32 {q4, q5}, [r7, :128]! + vst2.32 {q6, q7}, [r12, :128]! + bne neon_x8_t_loop + vpop {q4-q7} + pop {r4-r8, pc} diff --git a/src/neon_static_i.s b/src/neon_static_i.s index d8f8d9c..20dd7c1 100644 --- a/src/neon_static_i.s +++ b/src/neon_static_i.s @@ -1,662 +1,614 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2016, Jukka Ojanen +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - .fpu neon + .fpu neon - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_e_i + .globl _neon_static_e_i _neon_static_e_i: #else - .globl neon_static_e_i + .globl neon_static_e_i neon_static_e_i: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - ldr lr, [r0, #40] @ this is p->N - add r3, r1, #0 - add r7, r1, lr - add r5, r7, lr - add r10, r5, lr - add r4, r10, lr - add r8, r4, lr - add r6, r8, lr - add r9, r6, lr - ldr r12, [r0] - add r1, r0, #0 - add r0, r2, #0 - ldr r2, [r1, #16] @ this is p->ee_ws - ldr r11, [r1, #28] @ this is p->i0 - - vld1.32 {d16, d17}, [r2, :128] -_neon_ee_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 @ - vsub.f32 d28, d4, d3 @ - vadd.f32 d30, d4, d3 @ - vadd.f32 d5, d19, d14 @ - vadd.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 @ - vsub.f32 d4, d18, d15 @ - vsub.f32 d13, d19, d14 @ - vadd.f32 d12, d18, d15 @ - vsub.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_loop - - ldr r11, [r1, #12] - vld2.32 {q9}, [r5, :128]! @tag2 - vld2.32 {q13}, [r3, :128]! @tag0 - vld2.32 {q12}, [r4, :128]! @tag1 - vld2.32 {q0}, [r7, :128]! @tag4 - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! @tag3 - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vadd.f32 d9, d23, d20 @ - vsub.f32 d11, d23, d20 @ - vsub.f32 q8, q12, q8 - vsub.f32 d8, d22, d21 @ - vadd.f32 d10, d22, d21 @ - ldr r2, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r2, r0, r2, lsl #2 - vtrn.32 q8, q5 - add lr, r0, lr, lsl #2 - vswp d9,d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! @tag7 - vld2.32 {q15}, [r9, :128]! @tag6 - vld2.32 {q11}, [r8, :128]! @tag5 - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vadd.f32 d13, d29, d24 @ - vadd.f32 q15, q13, q11 - vsub.f32 d12, d28, d25 @ - vsub.f32 d15, d29, d24 @ - vadd.f32 d14, d28, d25 @ - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vadd.f32 d3, d17, d20 @ - vsub.f32 d7, d17, d20 @ - vsub.f32 d2, d16, d21 @ - vadd.f32 d6, d16, d21 @ - vswp d1, d2 - vswp d5, d6 - vstmia r2!, {q0-q3} - - add r2, r7, #0 - add r7, r9, #0 - add r9, r2, #0 - add r2, r8, #0 - add r8, r10, #0 - add r10, r2, #0 - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_oo_loop_exit -_neon_oo_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r7, :128]! - vld2.32 {q13}, [r9, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 @ - vadd.f32 d3, d19, d16 @ - vadd.f32 d6, d18, d17 @ - vsub.f32 d2, d18, d17 @ - vld2.32 {q9}, [r8, :128]! - vld2.32 {q8}, [r10, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r2, [r12], #4 - vsub.f32 d15, d19, d16 @ - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 @ - vadd.f32 d14, d18, d17 @ - vsub.f32 d10, d18, d17 @ - add r2, r0, r2, lsl #2 - vtrn.32 q1, q3 - add lr, r0, lr, lsl #2 - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_loop -_neon_oo_loop_exit: - - add r2, r3, #0 - add r3, r7, #0 - add r7, r2, #0 - add r2, r4, #0 - add r4, r8, #0 - add r8, r2, #0 - add r2, r5, #0 - add r5, r9, #0 - add r9, r2, #0 - add r2, r6, #0 - add r6, r10, #0 - add r10, r2, #0 - add r2, r9, #0 - add r9, r10, #0 - add r10, r2, #0 - ldr r2, [r1, #16] - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_ee_loop2_exit - - vld1.32 {d16, d17}, [r2, :128] -_neon_ee_loop2: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 @ - vsub.f32 d28, d4, d3 @ - vadd.f32 d30, d4, d3 @ - vadd.f32 d5, d19, d14 @ - vadd.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 @ - vsub.f32 d4, d18, d15 @ - vsub.f32 d13, d19, d14 @ - vadd.f32 d12, d18, d15 @ - vsub.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_loop2 -_neon_ee_loop2_exit: + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop + + ldr r11, [r0, #12] + vld2.32 {q9}, [r5, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q12}, [r4, :128]! + vld2.32 {q0}, [r7, :128]! + vsub.f32 q11, q13, q12 + vld2.32 {q8}, [r6, :128]! + vadd.f32 q12, q13, q12 + vsub.f32 q10, q9, q8 + vadd.f32 q8, q9, q8 + vadd.f32 q9, q12, q8 + vadd.f32 d9, d23, d20 + vsub.f32 d11, d23, d20 + vsub.f32 q8, q12, q8 + vsub.f32 d8, d22, d21 + vadd.f32 d10, d22, d21 + ldr r3, [r12], #4 + vld1.32 {d20, d21}, [r11, :128] + ldr lr, [r12], #4 + vtrn.32 q9, q4 + add r3, r2, r3, lsl #2 + vtrn.32 q8, q5 + add lr, r2, lr, lsl #2 + vswp d9, d10 + vst1.32 {d8,d9,d10,d11}, [lr, :128]! + vld2.32 {q13}, [r10, :128]! + vld2.32 {q15}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vsub.f32 q14, q15, q13 + vsub.f32 q12, q0, q11 + vadd.f32 q11, q0, q11 + vadd.f32 q13, q15, q13 + vadd.f32 d13, d29, d24 + vadd.f32 q15, q13, q11 + vsub.f32 d12, d28, d25 + vsub.f32 d15, d29, d24 + vadd.f32 d14, d28, d25 + vtrn.32 q15, q6 + vsub.f32 q15, q13, q11 + vtrn.32 q15, q7 + vswp d13, d14 + vst1.32 {d12,d13,d14,d15}, [lr, :128]! + vtrn.32 q13, q14 + vtrn.32 q11, q12 + vmul.f32 d24, d26, d21 + vmul.f32 d28, d27, d20 + vmul.f32 d25, d26, d20 + vmul.f32 d26, d27, d21 + vmul.f32 d27, d22, d21 + vmul.f32 d30, d23, d20 + vmul.f32 d29, d23, d21 + vmul.f32 d22, d22, d20 + vsub.f32 d21, d28, d24 + vadd.f32 d20, d26, d25 + vadd.f32 d25, d30, d27 + vsub.f32 d24, d22, d29 + vadd.f32 q11, q12, q10 + ldr r11, [r0, #32] @ p->i1 + vsub.f32 q10, q12, q10 + vadd.f32 q0, q9, q11 + vsub.f32 q2, q9, q11 + vadd.f32 d3, d17, d20 + vsub.f32 d7, d17, d20 + vsub.f32 d2, d16, d21 + vadd.f32 d6, d16, d21 + cmp r11, #0 + vswp d1, d2 + vswp d5, d6 + vstmia r3!, {q0-q3} + beq _neon_ee_loop2_exit +_neon_oo_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vsub.f32 d15, d19, d16 + ldr lr, [r12], #4 + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_oo_loop + + ldr r3, [r0, #16] @ p->ee_ws + ldr r11, [r0, #32] @ p->i1 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop2 +_neon_ee_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_o_i + .globl _neon_static_o_i _neon_static_o_i: #else - .globl neon_static_o_i + .globl neon_static_o_i neon_static_o_i: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - ldr lr, [r0, #40] @ this is p->N - add r3, r1, #0 - add r7, r1, lr - add r5, r7, lr - add r10, r5, lr - add r4, r10, lr - add r8, r4, lr - add r6, r8, lr - add r9, r6, lr - ldr r12, [r0] - add r1, r0, #0 - add r0, r2, #0 - ldr r2, [r1, #16] @ this is p->ee_ws - ldr r11, [r1, #28] @ this is p->i0 - - vld1.32 {d16, d17}, [r2, :128] + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] _neon_ee_o_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 @ - vsub.f32 d28, d4, d3 @ - vadd.f32 d30, d4, d3 @ - vadd.f32 d5, d19, d14 @ - vadd.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 @ - vsub.f32 d4, d18, d15 @ - vsub.f32 d13, d19, d14 @ - vadd.f32 d12, d18, d15 @ - vsub.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_o_loop - - add r2, r7, #0 - add r7, r9, #0 - add r9, r2, #0 - add r2, r8, #0 - add r8, r10, #0 - add r10, r2, #0 - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_oo_o_loop_exit + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop + + ldr r11, [r0, #32] @ p->i1 + cmp r11, #0 + beq _neon_oo_o_loop_exit _neon_oo_o_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r7, :128]! - vld2.32 {q13}, [r9, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 @ - vadd.f32 d3, d19, d16 @ - vadd.f32 d6, d18, d17 @ - vsub.f32 d2, d18, d17 @ - vld2.32 {q9}, [r8, :128]! - vld2.32 {q8}, [r10, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r2, [r12], #4 - vsub.f32 d15, d19, d16 @ - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 @ - vadd.f32 d14, d18, d17 @ - vsub.f32 d10, d18, d17 @ - add r2, r0, r2, lsl #2 - vtrn.32 q1, q3 - add lr, r0, lr, lsl #2 - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop -_neon_oo_o_loop_exit: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vsub.f32 d15, d19, d16 + ldr lr, [r12], #4 + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4,q5}, [r3, :128]! + vst2.32 {q6,q7}, [lr, :128]! + bne _neon_oo_o_loop - ldr r11, [r1, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vld2.32 {q15}, [r10, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r2, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r2, r0, r2, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r0, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vadd.f32 d25, d19, d16 @ - vsub.f32 d27, d19, d16 @ - vsub.f32 q1, q11, q10 - vsub.f32 d24, d18, d17 @ - vadd.f32 d26, d18, d17 @ - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r2, :128]! - vld2.32 {q0}, [r9, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vadd.f32 d29, d1, d30 @ - vsub.f32 d27, d1, d30 @ - vsub.f32 q3, q3, q1 - vsub.f32 d28, d0, d31 @ - vadd.f32 d26, d0, d31 @ - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r2, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - vsub.f32 d21, d30, d19 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vadd.f32 d11, d27, d16 @ - vsub.f32 d15, d27, d16 @ - vsub.f32 d10, d26, d17 @ - vadd.f32 d14, d26, d17 @ - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - - - add r2, r3, #0 - add r3, r7, #0 - add r7, r2, #0 - add r2, r4, #0 - add r4, r8, #0 - add r8, r2, #0 - add r2, r5, #0 - add r5, r9, #0 - add r9, r2, #0 - add r2, r6, #0 - add r6, r10, #0 - add r10, r2, #0 - add r2, r9, #0 - add r9, r10, #0 - add r10, r2, #0 - ldr r2, [r1, #16] - ldr r11, [r1, #32] @ this is p->i1 - cmp r11, #0 - beq _neon_ee_o_loop2_exit - - vld1.32 {d16, d17}, [r2, :128] +_neon_oo_o_loop_exit: + ldr r11, [r0, #8] + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q15}, [r8, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r3, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r3, r2, r3, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r2, lr, lsl #2 + vadd.f32 q10, q10, q12 + vadd.f32 q0, q11, q10 + vadd.f32 d25, d19, d16 + vsub.f32 d27, d19, d16 + vsub.f32 q1, q11, q10 + vsub.f32 d24, d18, d17 + vadd.f32 d26, d18, d17 + vtrn.32 q0, q12 + vtrn.32 q1, q13 + vld1.32 {d24, d25}, [r11, :128] + vswp d1, d2 + vst1.32 {q0, q1}, [r3, :128]! + vld2.32 {q0}, [r7, :128]! + vadd.f32 q1, q0, q15 + vld2.32 {q13}, [r10, :128]! + vld2.32 {q14}, [r9, :128]! + vsub.f32 q15, q0, q15 + vsub.f32 q0, q14, q13 + vadd.f32 q3, q14, q13 + vadd.f32 q2, q3, q1 + vadd.f32 d29, d1, d30 + vsub.f32 d27, d1, d30 + vsub.f32 q3, q3, q1 + vsub.f32 d28, d0, d31 + vadd.f32 d26, d0, d31 + vtrn.32 q2, q14 + vtrn.32 q3, q13 + vswp d5, d6 + vst1.32 {q2, q3}, [r3, :128]! + vtrn.32 q11, q9 + vtrn.32 q10, q8 + vmul.f32 d20, d18, d25 + vmul.f32 d22, d19, d24 + vmul.f32 d21, d19, d25 + vmul.f32 d18, d18, d24 + vmul.f32 d19, d16, d25 + vmul.f32 d30, d17, d24 + vmul.f32 d23, d16, d24 + vmul.f32 d24, d17, d25 + vadd.f32 d17, d22, d20 + vsub.f32 d16, d18, d21 + ldr r3, [r0, #16] @ p->ee_ws + vsub.f32 d21, d30, d19 + ldr r11, [r0, #32] @ p->i1 + vadd.f32 d20, d24, d23 + vadd.f32 q9, q8, q10 + vsub.f32 q8, q8, q10 + vadd.f32 q4, q14, q9 + vsub.f32 q6, q14, q9 + vadd.f32 d11, d27, d16 + vsub.f32 d15, d27, d16 + vsub.f32 d10, d26, d17 + vadd.f32 d14, d26, d17 + cmp r11, #0 + vswp d9, d10 + vswp d13, d14 + vstmia lr!, {q4-q7} + beq _neon_ee_o_loop2_exit + + vld1.32 {d16, d17}, [r3, :128] _neon_ee_o_loop2: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 @ - vsub.f32 d28, d4, d3 @ - vadd.f32 d30, d4, d3 @ - vadd.f32 d5, d19, d14 @ - vadd.f32 d7, d31, d26 @ - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 @ - vsub.f32 d4, d18, d15 @ - vsub.f32 d13, d19, d14 @ - vadd.f32 d12, d18, d15 @ - vsub.f32 d15, d31, d26 @ - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 @ - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_o_loop2 -_neon_ee_o_loop2_exit: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop2 - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} +_neon_ee_o_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x4_i + .globl _neon_static_x4_i _neon_static_x4_i: #else - .globl neon_static_x4_i + .globl neon_static_x4_i neon_static_x4_i: #endif add r3, r0, #64 @@ -703,252 +655,244 @@ neon_static_x4_i: vpop {q4-q7} bx lr - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_static_x8_i + .globl _neon_static_x8_i _neon_static_x8_i: #else - .globl neon_static_x8_i + .globl neon_static_x8_i neon_static_x8_i: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 @ - vadd.f32 q6, q12, q15 @ - vadd.f32 q5, q13, q14 @ - vsub.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 @ - vadd.f32 q3, q9, q12 @ - vst1.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vsub.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst1.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vadd.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst1.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 @ - vadd.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst1.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 @ - vst1.32 {q2,q3}, [r6, :128]! - vsub.f32 q7, q11, q14 @ - vst1.32 {q4,q5}, [r8, :128]! - vst1.32 {q6,q7}, [r10, :128]! - bne neon_x8_loop - - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} - - .align 4 + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst1.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst1.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst1.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst1.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst1.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst1.32 {q2, q3}, [r5, :128]! + vsub.f32 q7, q11, q14 + vst1.32 {q4, q5}, [r7, :128]! + vst1.32 {q6, q7}, [r12, :128]! + bne neon_x8_loop + + vpop {q4-q7} + pop {r4-r8, pc} + + .align 4 #ifdef __APPLE__ - .globl _neon_static_x8_t_i + .globl _neon_static_x8_t_i _neon_static_x8_t_i: #else - .globl neon_static_x8_t_i + .globl neon_static_x8_t_i neon_static_x8_t_i: #endif - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} - vstmdb sp!, {d8-d15} - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_t_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 @ - vadd.f32 q6, q12, q15 @ - vadd.f32 q5, q13, q14 @ - vsub.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 @ - vadd.f32 q3, q9, q12 @ - vst2.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vsub.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst2.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vadd.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst2.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 @ - vadd.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst2.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 @ - vst2.32 {q2,q3}, [r6, :128]! - vsub.f32 q7, q11, q14 @ - vst2.32 {q4,q5}, [r8, :128]! - vst2.32 {q6,q7}, [r10, :128]! - bne neon_x8_t_loop - - vldmia sp!, {d8-d15} - pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} + push {r4-r8, lr} + vpush {q4-q7} + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_t_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst2.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12,:128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst2.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst2.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst2.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst2.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst2.32 {q2, q3}, [r5, :128]! + vsub.f32 q7, q11, q14 + vst2.32 {q4, q5}, [r7, :128]! + vst2.32 {q6, q7}, [r12,:128]! + bne neon_x8_t_loop + vpop {q4-q7} + pop {r4-r8, pc} -- cgit v1.1 From b982322767378e5a816e943deea8d8c14f50bf6f Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 17 Mar 2016 12:19:59 +0200 Subject: Combine neon_static_f.s and neon_static_i.s to neon_static.s --- CMakeLists.txt | 3 +- src/neon_static.s | 1762 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/neon_static_f.s | 898 -------------------------- src/neon_static_i.s | 898 -------------------------- 4 files changed, 1763 insertions(+), 1798 deletions(-) create mode 100644 src/neon_static.s delete mode 100644 src/neon_static_f.s delete mode 100644 src/neon_static_i.s diff --git a/CMakeLists.txt b/CMakeLists.txt index 5104962..58f402b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -367,8 +367,7 @@ if(ENABLE_NEON) if(DISABLE_DYNAMIC_CODE) list(APPEND FFTS_SOURCES - src/neon_static_f.s - src/neon_static_i.s + src/neon_static.s ) endif(DISABLE_DYNAMIC_CODE) diff --git a/src/neon_static.s b/src/neon_static.s new file mode 100644 index 0000000..e183a14 --- /dev/null +++ b/src/neon_static.s @@ -0,0 +1,1762 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2016, Jukka Ojanen +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + .fpu neon + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_e_f +_neon_static_e_f: +#else + .globl neon_static_e_f +neon_static_e_f: +#endif + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop + + ldr r11, [r0, #12] + vld2.32 {q9}, [r5, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q12}, [r4, :128]! + vld2.32 {q0}, [r7, :128]! + vsub.f32 q11, q13, q12 + vld2.32 {q8}, [r6, :128]! + vadd.f32 q12, q13, q12 + vsub.f32 q10, q9, q8 + vadd.f32 q8, q9, q8 + vadd.f32 q9, q12, q8 + vsub.f32 d9, d23, d20 + vadd.f32 d11, d23, d20 + vsub.f32 q8, q12, q8 + vadd.f32 d8, d22, d21 + vsub.f32 d10, d22, d21 + ldr r3, [r12], #4 + vld1.32 {d20, d21}, [r11, :128] + ldr lr, [r12], #4 + vtrn.32 q9, q4 + add r3, r2, r3, lsl #2 + vtrn.32 q8, q5 + add lr, r2, lr, lsl #2 + vswp d9, d10 + vst1.32 {d8,d9,d10,d11}, [lr, :128]! + vld2.32 {q13}, [r10, :128]! + vld2.32 {q15}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vsub.f32 q14, q15, q13 + vsub.f32 q12, q0, q11 + vadd.f32 q11, q0, q11 + vadd.f32 q13, q15, q13 + vsub.f32 d13, d29, d24 + vadd.f32 q15, q13, q11 + vadd.f32 d12, d28, d25 + vadd.f32 d15, d29, d24 + vsub.f32 d14, d28, d25 + vtrn.32 q15, q6 + vsub.f32 q15, q13, q11 + vtrn.32 q15, q7 + vswp d13, d14 + vst1.32 {d12,d13,d14,d15}, [lr, :128]! + vtrn.32 q13, q14 + vtrn.32 q11, q12 + vmul.f32 d24, d26, d21 + vmul.f32 d28, d27, d20 + vmul.f32 d25, d26, d20 + vmul.f32 d26, d27, d21 + vmul.f32 d27, d22, d21 + vmul.f32 d30, d23, d20 + vmul.f32 d29, d23, d21 + vmul.f32 d22, d22, d20 + vsub.f32 d21, d28, d24 + vadd.f32 d20, d26, d25 + vadd.f32 d25, d30, d27 + vsub.f32 d24, d22, d29 + vadd.f32 q11, q12, q10 + ldr r11, [r0, #32] @ p->i1 + vsub.f32 q10, q12, q10 + vadd.f32 q0, q9, q11 + vsub.f32 q2, q9, q11 + vsub.f32 d3, d17, d20 + vadd.f32 d7, d17, d20 + vadd.f32 d2, d16, d21 + vsub.f32 d6, d16, d21 + cmp r11, #0 + vswp d1, d2 + vswp d5, d6 + vstmia r3!, {q0-q3} + beq _neon_ee_loop2_exit + +_neon_oo_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vadd.f32 d7, d19, d16 + vsub.f32 d3, d19, d16 + vsub.f32 d6, d18, d17 + vadd.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vadd.f32 d15, d19, d16 + ldr lr, [r12], #4 + vsub.f32 d11, d19, d16 + vsub.f32 d14, d18, d17 + vadd.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_oo_loop + + ldr r3, [r0, #16] @ p->ee_ws + ldr r11, [r0, #32] @ p->i1 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop2 + +_neon_ee_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_e_i +_neon_static_e_i: +#else + .globl neon_static_e_i +neon_static_e_i: +#endif + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop + + ldr r11, [r0, #12] + vld2.32 {q9}, [r5, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q12}, [r4, :128]! + vld2.32 {q0}, [r7, :128]! + vsub.f32 q11, q13, q12 + vld2.32 {q8}, [r6, :128]! + vadd.f32 q12, q13, q12 + vsub.f32 q10, q9, q8 + vadd.f32 q8, q9, q8 + vadd.f32 q9, q12, q8 + vadd.f32 d9, d23, d20 + vsub.f32 d11, d23, d20 + vsub.f32 q8, q12, q8 + vsub.f32 d8, d22, d21 + vadd.f32 d10, d22, d21 + ldr r3, [r12], #4 + vld1.32 {d20, d21}, [r11, :128] + ldr lr, [r12], #4 + vtrn.32 q9, q4 + add r3, r2, r3, lsl #2 + vtrn.32 q8, q5 + add lr, r2, lr, lsl #2 + vswp d9, d10 + vst1.32 {d8,d9,d10,d11}, [lr, :128]! + vld2.32 {q13}, [r10, :128]! + vld2.32 {q15}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vsub.f32 q14, q15, q13 + vsub.f32 q12, q0, q11 + vadd.f32 q11, q0, q11 + vadd.f32 q13, q15, q13 + vadd.f32 d13, d29, d24 + vadd.f32 q15, q13, q11 + vsub.f32 d12, d28, d25 + vsub.f32 d15, d29, d24 + vadd.f32 d14, d28, d25 + vtrn.32 q15, q6 + vsub.f32 q15, q13, q11 + vtrn.32 q15, q7 + vswp d13, d14 + vst1.32 {d12,d13,d14,d15}, [lr, :128]! + vtrn.32 q13, q14 + vtrn.32 q11, q12 + vmul.f32 d24, d26, d21 + vmul.f32 d28, d27, d20 + vmul.f32 d25, d26, d20 + vmul.f32 d26, d27, d21 + vmul.f32 d27, d22, d21 + vmul.f32 d30, d23, d20 + vmul.f32 d29, d23, d21 + vmul.f32 d22, d22, d20 + vsub.f32 d21, d28, d24 + vadd.f32 d20, d26, d25 + vadd.f32 d25, d30, d27 + vsub.f32 d24, d22, d29 + vadd.f32 q11, q12, q10 + ldr r11, [r0, #32] @ p->i1 + vsub.f32 q10, q12, q10 + vadd.f32 q0, q9, q11 + vsub.f32 q2, q9, q11 + vadd.f32 d3, d17, d20 + vsub.f32 d7, d17, d20 + vsub.f32 d2, d16, d21 + vadd.f32 d6, d16, d21 + cmp r11, #0 + vswp d1, d2 + vswp d5, d6 + vstmia r3!, {q0-q3} + beq _neon_ee_loop2_exit + +_neon_oo_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vsub.f32 d15, d19, d16 + ldr lr, [r12], #4 + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_oo_loop + + ldr r3, [r0, #16] @ p->ee_ws + ldr r11, [r0, #32] @ p->i1 + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_loop2 + +_neon_ee_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_o_f +_neon_static_o_f: +#else + .globl neon_static_o_f +neon_static_o_f: +#endif + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_o_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop + + ldr r11, [r0, #32] @ p->i1 + cmp r11, #0 + beq _neon_oo_o_loop_exit +_neon_oo_o_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vadd.f32 d7, d19, d16 + vsub.f32 d3, d19, d16 + vsub.f32 d6, d18, d17 + vadd.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vadd.f32 d15, d19, d16 + ldr lr, [r12], #4 + vsub.f32 d11, d19, d16 + vsub.f32 d14, d18, d17 + vadd.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4,q5}, [r3, :128]! + vst2.32 {q6,q7}, [lr, :128]! + bne _neon_oo_o_loop + +_neon_oo_o_loop_exit: + ldr r11, [r0, #8] + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q15}, [r8, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r3, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r3, r2, r3, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r2, lr, lsl #2 + vadd.f32 q10, q10, q12 + vadd.f32 q0, q11, q10 + vsub.f32 d25, d19, d16 + vadd.f32 d27, d19, d16 + vsub.f32 q1, q11, q10 + vadd.f32 d24, d18, d17 + vsub.f32 d26, d18, d17 + vtrn.32 q0, q12 + vtrn.32 q1, q13 + vld1.32 {d24, d25}, [r11, :128] + vswp d1, d2 + vst1.32 {q0, q1}, [r3, :128]! + vld2.32 {q0}, [r7, :128]! + vadd.f32 q1, q0, q15 + vld2.32 {q13}, [r10, :128]! + vld2.32 {q14}, [r9, :128]! + vsub.f32 q15, q0, q15 + vsub.f32 q0, q14, q13 + vadd.f32 q3, q14, q13 + vadd.f32 q2, q3, q1 + vsub.f32 d29, d1, d30 + vadd.f32 d27, d1, d30 + vsub.f32 q3, q3, q1 + vadd.f32 d28, d0, d31 + vsub.f32 d26, d0, d31 + vtrn.32 q2, q14 + vtrn.32 q3, q13 + vswp d5, d6 + vst1.32 {q2, q3}, [r3, :128]! + vtrn.32 q11, q9 + vtrn.32 q10, q8 + vmul.f32 d20, d18, d25 + vmul.f32 d22, d19, d24 + vmul.f32 d21, d19, d25 + vmul.f32 d18, d18, d24 + vmul.f32 d19, d16, d25 + vmul.f32 d30, d17, d24 + vmul.f32 d23, d16, d24 + vmul.f32 d24, d17, d25 + vadd.f32 d17, d22, d20 + vsub.f32 d16, d18, d21 + ldr r3, [r0, #16] @ p->ee_ws + vsub.f32 d21, d30, d19 + ldr r11, [r0, #32] @ p->i1 + vadd.f32 d20, d24, d23 + vadd.f32 q9, q8, q10 + vsub.f32 q8, q8, q10 + vadd.f32 q4, q14, q9 + vsub.f32 q6, q14, q9 + vsub.f32 d11, d27, d16 + vadd.f32 d15, d27, d16 + vadd.f32 d10, d26, d17 + vsub.f32 d14, d26, d17 + cmp r11, #0 + vswp d9, d10 + vswp d13, d14 + vstmia lr!, {q4-q7} + beq _neon_ee_o_loop2_exit + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_o_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vsub.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vsub.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop2 + +_neon_ee_o_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_o_i +_neon_static_o_i: +#else + .globl neon_static_o_i +neon_static_o_i: +#endif + push {r4-r12, lr} + vpush {q4-q7} + + ldr lr, [r0, #40] @ p->N + ldr r12, [r0 ] @ p->offsets + ldr r3, [r0, #16] @ p->ee_ws + + add r7, r1, lr + add r5, r1, lr, lsl #1 + add r4, r1, lr, lsl #2 + add r10, r7, lr, lsl #1 + add r8, r7, lr, lsl #2 + + ldr r11, [r0, #28] @ p->i0 + + add r6, r4, lr, lsl #1 + add r9, r10, lr, lsl #2 + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_o_loop: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r1, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop + + ldr r11, [r0, #32] @ p->i1 + cmp r11, #0 + beq _neon_oo_o_loop_exit +_neon_oo_o_loop: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r9, :128]! + vld2.32 {q13}, [r7, :128]! + vsub.f32 q2, q12, q11 + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 + vld2.32 {q9}, [r10, :128]! + vld2.32 {q8}, [r8, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r3, [r12], #4 + vsub.f32 d15, d19, d16 + ldr lr, [r12], #4 + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 + add r3, r2, r3, lsl #2 + vtrn.32 q1, q3 + add lr, r2, lr, lsl #2 + vst2.32 {q0,q1}, [r3, :128]! + vst2.32 {q2,q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4,q5}, [r3, :128]! + vst2.32 {q6,q7}, [lr, :128]! + bne _neon_oo_o_loop + +_neon_oo_o_loop_exit: + ldr r11, [r0, #8] + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q15}, [r8, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r3, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r3, r2, r3, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r2, lr, lsl #2 + vadd.f32 q10, q10, q12 + vadd.f32 q0, q11, q10 + vadd.f32 d25, d19, d16 + vsub.f32 d27, d19, d16 + vsub.f32 q1, q11, q10 + vsub.f32 d24, d18, d17 + vadd.f32 d26, d18, d17 + vtrn.32 q0, q12 + vtrn.32 q1, q13 + vld1.32 {d24, d25}, [r11, :128] + vswp d1, d2 + vst1.32 {q0, q1}, [r3, :128]! + vld2.32 {q0}, [r7, :128]! + vadd.f32 q1, q0, q15 + vld2.32 {q13}, [r10, :128]! + vld2.32 {q14}, [r9, :128]! + vsub.f32 q15, q0, q15 + vsub.f32 q0, q14, q13 + vadd.f32 q3, q14, q13 + vadd.f32 q2, q3, q1 + vadd.f32 d29, d1, d30 + vsub.f32 d27, d1, d30 + vsub.f32 q3, q3, q1 + vsub.f32 d28, d0, d31 + vadd.f32 d26, d0, d31 + vtrn.32 q2, q14 + vtrn.32 q3, q13 + vswp d5, d6 + vst1.32 {q2, q3}, [r3, :128]! + vtrn.32 q11, q9 + vtrn.32 q10, q8 + vmul.f32 d20, d18, d25 + vmul.f32 d22, d19, d24 + vmul.f32 d21, d19, d25 + vmul.f32 d18, d18, d24 + vmul.f32 d19, d16, d25 + vmul.f32 d30, d17, d24 + vmul.f32 d23, d16, d24 + vmul.f32 d24, d17, d25 + vadd.f32 d17, d22, d20 + vsub.f32 d16, d18, d21 + ldr r3, [r0, #16] @ p->ee_ws + vsub.f32 d21, d30, d19 + ldr r11, [r0, #32] @ p->i1 + vadd.f32 d20, d24, d23 + vadd.f32 q9, q8, q10 + vsub.f32 q8, q8, q10 + vadd.f32 q4, q14, q9 + vsub.f32 q6, q14, q9 + vadd.f32 d11, d27, d16 + vsub.f32 d15, d27, d16 + vsub.f32 d10, d26, d17 + vadd.f32 d14, d26, d17 + cmp r11, #0 + vswp d9, d10 + vswp d13, d14 + vstmia lr!, {q4-q7} + beq _neon_ee_o_loop2_exit + + vld1.32 {d16, d17}, [r3, :128] +_neon_ee_o_loop2: + vld2.32 {q15}, [r5, :128]! + vld2.32 {q13}, [r4, :128]! + vld2.32 {q14}, [r1, :128]! + vld2.32 {q9}, [r10, :128]! + vld2.32 {q10}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vld2.32 {q12}, [r7, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r6, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r3, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r3, r2, r3, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r2, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r3, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r3, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne _neon_ee_o_loop2 + +_neon_ee_o_loop2_exit: + vpop {q4-q7} + pop {r4-r12, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x4_f +_neon_static_x4_f: +#else + .globl neon_static_x4_f +neon_static_x4_f: +#endif + add r3, r0, #64 + vpush {q4-q7} + + vld1.32 {q2, q3}, [r1, :128] + vld1.32 {q12, q13}, [r3, :128]! + mov r2, r0 + vmul.f32 q0, q13, q3 + vld1.32 {q14, q15}, [r3, :128] + vmul.f32 q5, q12, q2 + vld1.32 {q8, q9}, [r0, :128]! + vmul.f32 q1, q14, q2 + vld1.32 {q10, q11}, [r0, :128] + vmul.f32 q4, q14, q3 + vmul.f32 q14, q12, q3 + vmul.f32 q13, q13, q2 + vmul.f32 q12, q15, q3 + vmul.f32 q2, q15, q2 + vsub.f32 q0, q5, q0 + vadd.f32 q13, q13, q14 + vadd.f32 q12, q12, q1 + vsub.f32 q1, q2, q4 + vadd.f32 q15, q0, q12 + vsub.f32 q12, q0, q12 + vadd.f32 q14, q13, q1 + vsub.f32 q13, q13, q1 + vadd.f32 q0, q8, q15 + vadd.f32 q1, q9, q14 + vadd.f32 q2, q10, q13 + vsub.f32 q4, q8, q15 + vsub.f32 q3, q11, q12 + + vst1.32 {q0, q1}, [r2, :128]! + + vsub.f32 q5, q9, q14 + vsub.f32 q6, q10, q13 + vadd.f32 q7, q11, q12 + + vst1.32 {q2, q3}, [r2, :128]! + vst1.32 {q4, q5}, [r2, :128]! + vst1.32 {q6, q7}, [r2, :128] + + vpop {q4-q7} + bx lr + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x4_i +_neon_static_x4_i: +#else + .globl neon_static_x4_i +neon_static_x4_i: +#endif + add r3, r0, #64 + vpush {q4-q7} + + vld1.32 {q2, q3}, [r1, :128] + vld1.32 {q12, q13}, [r3, :128]! + mov r2, r0 + vmul.f32 q0, q13, q3 + vld1.32 {q14, q15}, [r3, :128] + vmul.f32 q5, q12, q2 + vld1.32 {q8, q9}, [r0, :128]! + vmul.f32 q1, q14, q2 + vld1.32 {q10, q11}, [r0, :128] + vmul.f32 q4, q14, q3 + vmul.f32 q14, q12, q3 + vmul.f32 q13, q13, q2 + vmul.f32 q12, q15, q3 + vmul.f32 q2, q15, q2 + vsub.f32 q0, q5, q0 + vadd.f32 q13, q13, q14 + vadd.f32 q12, q12, q1 + vsub.f32 q1, q2, q4 + vadd.f32 q15, q0, q12 + vsub.f32 q12, q0, q12 + vadd.f32 q14, q13, q1 + vsub.f32 q13, q13, q1 + vadd.f32 q0, q8, q15 + vadd.f32 q1, q9, q14 + vsub.f32 q2, q10, q13 + vsub.f32 q4, q8, q15 + vadd.f32 q3, q11, q12 + + vst1.32 {q0, q1}, [r2, :128]! + + vsub.f32 q5, q9, q14 + vadd.f32 q6, q10, q13 + vsub.f32 q7, q11, q12 + + vst1.32 {q2, q3}, [r2, :128]! + vst1.32 {q4, q5}, [r2, :128]! + vst1.32 {q6, q7}, [r2, :128] + + vpop {q4-q7} + bx lr + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x8_f +_neon_static_x8_f: +#else + .globl neon_static_x8_f +neon_static_x8_f: +#endif + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vadd.f32 q4, q12, q15 + vsub.f32 q6, q12, q15 + vsub.f32 q5, q13, q14 + vadd.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vadd.f32 q2, q8, q10 + vsub.f32 q3, q9, q12 + vst1.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vadd.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst1.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vsub.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst1.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst1.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vadd.f32 q2, q10, q15 + vsub.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst1.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vsub.f32 q6, q10, q15 + vst1.32 {q2, q3}, [r5, :128]! + vadd.f32 q7, q11, q14 + vst1.32 {q4, q5}, [r7, :128]! + vst1.32 {q6, q7}, [r12, :128]! + bne neon_x8_loop + + vpop {q4-q7} + pop {r4-r8, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x8_i +_neon_static_x8_i: +#else + .globl neon_static_x8_i +neon_static_x8_i: +#endif + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst1.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst1.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst1.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst1.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst1.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst1.32 {q2, q3}, [r5, :128]! + vsub.f32 q7, q11, q14 + vst1.32 {q4, q5}, [r7, :128]! + vst1.32 {q6, q7}, [r12, :128]! + bne neon_x8_loop + + vpop {q4-q7} + pop {r4-r8, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x8_t_f +_neon_static_x8_t_f: +#else + .globl neon_static_x8_t_f +neon_static_x8_t_f: +#endif + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_t_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vadd.f32 q4, q12, q15 + vsub.f32 q6, q12, q15 + vsub.f32 q5, q13, q14 + vadd.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vadd.f32 q2, q8, q10 + vsub.f32 q3, q9, q12 + vst2.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12, :128] + vadd.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst2.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vsub.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst2.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst2.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vadd.f32 q2, q10, q15 + vsub.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst2.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vsub.f32 q6, q10, q15 + vst2.32 {q2, q3}, [r5, :128]! + vadd.f32 q7, q11, q14 + vst2.32 {q4, q5}, [r7, :128]! + vst2.32 {q6, q7}, [r12, :128]! + bne neon_x8_t_loop + + vpop {q4-q7} + pop {r4-r8, pc} + + .align 4 +#ifdef __APPLE__ + .globl _neon_static_x8_t_i +_neon_static_x8_t_i: +#else + .globl neon_static_x8_t_i +neon_static_x8_t_i: +#endif + push {r4-r8, lr} + vpush {q4-q7} + + add r4, r0, r1, lsl #1 @ data2 + add r3, r0, r1 @ data1 + add r6, r4, r1, lsl #1 @ data4 + add r5, r4, r1 @ data3 + add r8, r6, r1, lsl #1 @ data6 + add r7, r6, r1 @ data5 + add r12, r8, r1 @ data7 + +neon_x8_t_loop: + vld1.32 {q2, q3}, [r2, :128]! + subs r1, r1, #32 + vld1.32 {q14, q15}, [r5, :128] + vmul.f32 q12, q15, q2 + vld1.32 {q10, q11}, [r4, :128] + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vsub.f32 q10, q12, q8 + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vsub.f32 q9, q1, q14 + vld1.32 {q12, q13}, [r3, :128] + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r8, :128] + vld1.32 {q12, q13}, [r6, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r3, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r5, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r0, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst2.32 {q0, q1}, [r0, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r12,:128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r7, :128] + vst2.32 {q2, q3}, [r4, :128]! + vld1.32 {q2, q3}, [r2, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst2.32 {q4, q5}, [r6, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst2.32 {q6, q7}, [r8, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r3, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r5, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst2.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst2.32 {q2, q3}, [r5, :128]! + vsub.f32 q7, q11, q14 + vst2.32 {q4, q5}, [r7, :128]! + vst2.32 {q6, q7}, [r12,:128]! + bne neon_x8_t_loop + + vpop {q4-q7} + pop {r4-r8, pc} diff --git a/src/neon_static_f.s b/src/neon_static_f.s deleted file mode 100644 index 452d8d4..0000000 --- a/src/neon_static_f.s +++ /dev/null @@ -1,898 +0,0 @@ -/* - -This file is part of FFTS -- The Fastest Fourier Transform in the South - -Copyright (c) 2016, Jukka Ojanen -Copyright (c) 2012, Anthony M. Blake -Copyright (c) 2012, The University of Waikato - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of the organization nor the -names of its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - .fpu neon - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_e_f -_neon_static_e_f: -#else - .globl neon_static_e_f -neon_static_e_f: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop - - ldr r11, [r0, #12] - vld2.32 {q9}, [r5, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q12}, [r4, :128]! - vld2.32 {q0}, [r7, :128]! - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vsub.f32 d9, d23, d20 - vadd.f32 d11, d23, d20 - vsub.f32 q8, q12, q8 - vadd.f32 d8, d22, d21 - vsub.f32 d10, d22, d21 - ldr r3, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r3, r2, r3, lsl #2 - vtrn.32 q8, q5 - add lr, r2, lr, lsl #2 - vswp d9, d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! - vld2.32 {q15}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vsub.f32 d13, d29, d24 - vadd.f32 q15, q13, q11 - vadd.f32 d12, d28, d25 - vadd.f32 d15, d29, d24 - vsub.f32 d14, d28, d25 - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 - ldr r11, [r0, #32] @ p->i1 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vsub.f32 d3, d17, d20 - vadd.f32 d7, d17, d20 - vadd.f32 d2, d16, d21 - vsub.f32 d6, d16, d21 - cmp r11, #0 - vswp d1, d2 - vswp d5, d6 - vstmia r3!, {q0-q3} - beq _neon_ee_loop2_exit - -_neon_oo_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vadd.f32 d7, d19, d16 - vsub.f32 d3, d19, d16 - vsub.f32 d6, d18, d17 - vadd.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vadd.f32 d15, d19, d16 - ldr lr, [r12], #4 - vsub.f32 d11, d19, d16 - vsub.f32 d14, d18, d17 - vadd.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_oo_loop - - ldr r3, [r0, #16] @ p->ee_ws - ldr r11, [r0, #32] @ p->i1 - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop2: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop2 - -_neon_ee_loop2_exit: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_o_f -_neon_static_o_f: -#else - .globl neon_static_o_f -neon_static_o_f: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop - - ldr r11, [r0, #32] @ p->i1 - cmp r11, #0 - beq _neon_oo_o_loop_exit -_neon_oo_o_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vadd.f32 d7, d19, d16 - vsub.f32 d3, d19, d16 - vsub.f32 d6, d18, d17 - vadd.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vadd.f32 d15, d19, d16 - ldr lr, [r12], #4 - vsub.f32 d11, d19, d16 - vsub.f32 d14, d18, d17 - vadd.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r3, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop - -_neon_oo_o_loop_exit: - ldr r11, [r0, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q15}, [r8, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r3, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r3, r2, r3, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r2, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vsub.f32 d25, d19, d16 - vadd.f32 d27, d19, d16 - vsub.f32 q1, q11, q10 - vadd.f32 d24, d18, d17 - vsub.f32 d26, d18, d17 - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r3, :128]! - vld2.32 {q0}, [r7, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r10, :128]! - vld2.32 {q14}, [r9, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vsub.f32 d29, d1, d30 - vadd.f32 d27, d1, d30 - vsub.f32 q3, q3, q1 - vadd.f32 d28, d0, d31 - vsub.f32 d26, d0, d31 - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r3, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - ldr r3, [r0, #16] @ p->ee_ws - vsub.f32 d21, d30, d19 - ldr r11, [r0, #32] @ p->i1 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vsub.f32 d11, d27, d16 - vadd.f32 d15, d27, d16 - vadd.f32 d10, d26, d17 - vsub.f32 d14, d26, d17 - cmp r11, #0 - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - beq _neon_ee_o_loop2_exit - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop2: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop2 - -_neon_ee_o_loop2_exit: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x4_f -_neon_static_x4_f: -#else - .globl neon_static_x4_f -neon_static_x4_f: -#endif - add r3, r0, #64 - vpush {q4-q7} - - vld1.32 {q2, q3}, [r1, :128] - vld1.32 {q12, q13}, [r3, :128]! - mov r2, r0 - vmul.f32 q0, q13, q3 - vld1.32 {q14, q15}, [r3, :128] - vmul.f32 q5, q12, q2 - vld1.32 {q8, q9}, [r0, :128]! - vmul.f32 q1, q14, q2 - vld1.32 {q10, q11}, [r0, :128] - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vadd.f32 q2, q10, q13 - vsub.f32 q4, q8, q15 - vsub.f32 q3, q11, q12 - - vst1.32 {q0, q1}, [r2, :128]! - - vsub.f32 q5, q9, q14 - vsub.f32 q6, q10, q13 - vadd.f32 q7, q11, q12 - - vst1.32 {q2, q3}, [r2, :128]! - vst1.32 {q4, q5}, [r2, :128]! - vst1.32 {q6, q7}, [r2, :128] - - vpop {q4-q7} - bx lr - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x8_f -_neon_static_x8_f: -#else - .globl neon_static_x8_f -neon_static_x8_f: -#endif - push {r4-r8, lr} - vpush {q4-q7} - - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 - -neon_x8_loop: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vadd.f32 q4, q12, q15 - vsub.f32 q6, q12, q15 - vsub.f32 q5, q13, q14 - vadd.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vadd.f32 q2, q8, q10 - vsub.f32 q3, q9, q12 - vst1.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12, :128] - vadd.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst1.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vsub.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst1.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vadd.f32 q2, q10, q15 - vsub.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst1.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vsub.f32 q6, q10, q15 - vst1.32 {q2, q3}, [r5, :128]! - vadd.f32 q7, q11, q14 - vst1.32 {q4, q5}, [r7, :128]! - vst1.32 {q6, q7}, [r12, :128]! - bne neon_x8_loop - - vpop {q4-q7} - pop {r4-r8, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x8_t_f -_neon_static_x8_t_f: -#else - .globl neon_static_x8_t_f -neon_static_x8_t_f: -#endif - push {r4-r8, lr} - vpush {q4-q7} - - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 - -neon_x8_t_loop: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vadd.f32 q4, q12, q15 - vsub.f32 q6, q12, q15 - vsub.f32 q5, q13, q14 - vadd.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vadd.f32 q2, q8, q10 - vsub.f32 q3, q9, q12 - vst2.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12, :128] - vadd.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst2.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vsub.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst2.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vadd.f32 q2, q10, q15 - vsub.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst2.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vsub.f32 q6, q10, q15 - vst2.32 {q2, q3}, [r5, :128]! - vadd.f32 q7, q11, q14 - vst2.32 {q4, q5}, [r7, :128]! - vst2.32 {q6, q7}, [r12, :128]! - bne neon_x8_t_loop - - vpop {q4-q7} - pop {r4-r8, pc} diff --git a/src/neon_static_i.s b/src/neon_static_i.s deleted file mode 100644 index 20dd7c1..0000000 --- a/src/neon_static_i.s +++ /dev/null @@ -1,898 +0,0 @@ -/* - -This file is part of FFTS -- The Fastest Fourier Transform in the South - -Copyright (c) 2016, Jukka Ojanen -Copyright (c) 2012, Anthony M. Blake -Copyright (c) 2012, The University of Waikato - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of the organization nor the -names of its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -*/ - .fpu neon - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_e_i -_neon_static_e_i: -#else - .globl neon_static_e_i -neon_static_e_i: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop - - ldr r11, [r0, #12] - vld2.32 {q9}, [r5, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q12}, [r4, :128]! - vld2.32 {q0}, [r7, :128]! - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vadd.f32 d9, d23, d20 - vsub.f32 d11, d23, d20 - vsub.f32 q8, q12, q8 - vsub.f32 d8, d22, d21 - vadd.f32 d10, d22, d21 - ldr r3, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r3, r2, r3, lsl #2 - vtrn.32 q8, q5 - add lr, r2, lr, lsl #2 - vswp d9, d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! - vld2.32 {q15}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vadd.f32 d13, d29, d24 - vadd.f32 q15, q13, q11 - vsub.f32 d12, d28, d25 - vsub.f32 d15, d29, d24 - vadd.f32 d14, d28, d25 - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 - ldr r11, [r0, #32] @ p->i1 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vadd.f32 d3, d17, d20 - vsub.f32 d7, d17, d20 - vsub.f32 d2, d16, d21 - vadd.f32 d6, d16, d21 - cmp r11, #0 - vswp d1, d2 - vswp d5, d6 - vstmia r3!, {q0-q3} - beq _neon_ee_loop2_exit - -_neon_oo_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 - vadd.f32 d3, d19, d16 - vadd.f32 d6, d18, d17 - vsub.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vsub.f32 d15, d19, d16 - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 - vadd.f32 d14, d18, d17 - vsub.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_oo_loop - - ldr r3, [r0, #16] @ p->ee_ws - ldr r11, [r0, #32] @ p->i1 - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop2: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop2 - -_neon_ee_loop2_exit: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_o_i -_neon_static_o_i: -#else - .globl neon_static_o_i -neon_static_o_i: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop - - ldr r11, [r0, #32] @ p->i1 - cmp r11, #0 - beq _neon_oo_o_loop_exit -_neon_oo_o_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 - vadd.f32 d3, d19, d16 - vadd.f32 d6, d18, d17 - vsub.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vsub.f32 d15, d19, d16 - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 - vadd.f32 d14, d18, d17 - vsub.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r3, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop - -_neon_oo_o_loop_exit: - ldr r11, [r0, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q15}, [r8, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r3, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r3, r2, r3, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r2, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vadd.f32 d25, d19, d16 - vsub.f32 d27, d19, d16 - vsub.f32 q1, q11, q10 - vsub.f32 d24, d18, d17 - vadd.f32 d26, d18, d17 - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r3, :128]! - vld2.32 {q0}, [r7, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r10, :128]! - vld2.32 {q14}, [r9, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vadd.f32 d29, d1, d30 - vsub.f32 d27, d1, d30 - vsub.f32 q3, q3, q1 - vsub.f32 d28, d0, d31 - vadd.f32 d26, d0, d31 - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r3, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - ldr r3, [r0, #16] @ p->ee_ws - vsub.f32 d21, d30, d19 - ldr r11, [r0, #32] @ p->i1 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vadd.f32 d11, d27, d16 - vsub.f32 d15, d27, d16 - vsub.f32 d10, d26, d17 - vadd.f32 d14, d26, d17 - cmp r11, #0 - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - beq _neon_ee_o_loop2_exit - - vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop2: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop2 - -_neon_ee_o_loop2_exit: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x4_i -_neon_static_x4_i: -#else - .globl neon_static_x4_i -neon_static_x4_i: -#endif - add r3, r0, #64 - vpush {q4-q7} - - vld1.32 {q2, q3}, [r1, :128] - vld1.32 {q12, q13}, [r3, :128]! - mov r2, r0 - vmul.f32 q0, q13, q3 - vld1.32 {q14, q15}, [r3, :128] - vmul.f32 q5, q12, q2 - vld1.32 {q8, q9}, [r0, :128]! - vmul.f32 q1, q14, q2 - vld1.32 {q10, q11}, [r0, :128] - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vsub.f32 q2, q10, q13 - vsub.f32 q4, q8, q15 - vadd.f32 q3, q11, q12 - - vst1.32 {q0, q1}, [r2, :128]! - - vsub.f32 q5, q9, q14 - vadd.f32 q6, q10, q13 - vsub.f32 q7, q11, q12 - - vst1.32 {q2, q3}, [r2, :128]! - vst1.32 {q4, q5}, [r2, :128]! - vst1.32 {q6, q7}, [r2, :128] - - vpop {q4-q7} - bx lr - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x8_i -_neon_static_x8_i: -#else - .globl neon_static_x8_i -neon_static_x8_i: -#endif - push {r4-r8, lr} - vpush {q4-q7} - - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 - -neon_x8_loop: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 - vadd.f32 q6, q12, q15 - vadd.f32 q5, q13, q14 - vsub.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 - vadd.f32 q3, q9, q12 - vst1.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12, :128] - vsub.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst1.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst1.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 - vadd.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst1.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 - vst1.32 {q2, q3}, [r5, :128]! - vsub.f32 q7, q11, q14 - vst1.32 {q4, q5}, [r7, :128]! - vst1.32 {q6, q7}, [r12, :128]! - bne neon_x8_loop - - vpop {q4-q7} - pop {r4-r8, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x8_t_i -_neon_static_x8_t_i: -#else - .globl neon_static_x8_t_i -neon_static_x8_t_i: -#endif - push {r4-r8, lr} - vpush {q4-q7} - - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 - -neon_x8_t_loop: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 - vadd.f32 q6, q12, q15 - vadd.f32 q5, q13, q14 - vsub.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 - vadd.f32 q3, q9, q12 - vst2.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12,:128] - vsub.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst2.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst2.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 - vadd.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst2.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 - vst2.32 {q2, q3}, [r5, :128]! - vsub.f32 q7, q11, q14 - vst2.32 {q4, q5}, [r7, :128]! - vst2.32 {q6, q7}, [r12,:128]! - bne neon_x8_t_loop - - vpop {q4-q7} - pop {r4-r8, pc} -- cgit v1.1 From 7dea85078a8abdf837b39795ca6b55e90c82b4f4 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 17 Mar 2016 13:02:58 +0200 Subject: Use local labels to fix 'symbol already defined' errors --- src/neon_static.s | 88 +++++++++++++++++++++++++++---------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/src/neon_static.s b/src/neon_static.s index e183a14..e752c70 100644 --- a/src/neon_static.s +++ b/src/neon_static.s @@ -60,7 +60,7 @@ neon_static_e_f: add r9, r10, lr, lsl #2 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop: +1: vld2.32 {q15}, [r10, :128]! vld2.32 {q13}, [r8, :128]! vld2.32 {q14}, [r7, :128]! @@ -124,7 +124,7 @@ _neon_ee_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop + bne 1b ldr r11, [r0, #12] vld2.32 {q9}, [r5, :128]! @@ -195,9 +195,9 @@ _neon_ee_loop: vswp d1, d2 vswp d5, d6 vstmia r3!, {q0-q3} - beq _neon_ee_loop2_exit + beq 4f -_neon_oo_loop: +2: vld2.32 {q8}, [r6, :128]! vld2.32 {q9}, [r5, :128]! vld2.32 {q10}, [r4, :128]! @@ -239,12 +239,12 @@ _neon_oo_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_oo_loop + bne 2b ldr r3, [r0, #16] @ p->ee_ws ldr r11, [r0, #32] @ p->i1 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop2: +3: vld2.32 {q15}, [r5, :128]! vld2.32 {q13}, [r4, :128]! vld2.32 {q14}, [r1, :128]! @@ -308,9 +308,9 @@ _neon_ee_loop2: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop2 + bne 3b -_neon_ee_loop2_exit: +4: vpop {q4-q7} pop {r4-r12, pc} @@ -341,7 +341,7 @@ neon_static_e_i: add r9, r10, lr, lsl #2 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop: +1: vld2.32 {q15}, [r10, :128]! vld2.32 {q13}, [r8, :128]! vld2.32 {q14}, [r7, :128]! @@ -405,7 +405,7 @@ _neon_ee_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop + bne 1b ldr r11, [r0, #12] vld2.32 {q9}, [r5, :128]! @@ -476,9 +476,9 @@ _neon_ee_loop: vswp d1, d2 vswp d5, d6 vstmia r3!, {q0-q3} - beq _neon_ee_loop2_exit + beq 4f -_neon_oo_loop: +2: vld2.32 {q8}, [r6, :128]! vld2.32 {q9}, [r5, :128]! vld2.32 {q10}, [r4, :128]! @@ -520,12 +520,12 @@ _neon_oo_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_oo_loop + bne 2b ldr r3, [r0, #16] @ p->ee_ws ldr r11, [r0, #32] @ p->i1 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_loop2: +3: vld2.32 {q15}, [r5, :128]! vld2.32 {q13}, [r4, :128]! vld2.32 {q14}, [r1, :128]! @@ -589,9 +589,9 @@ _neon_ee_loop2: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_loop2 + bne 3b -_neon_ee_loop2_exit: +4: vpop {q4-q7} pop {r4-r12, pc} @@ -622,7 +622,7 @@ neon_static_o_f: add r9, r10, lr, lsl #2 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop: +1: vld2.32 {q15}, [r10, :128]! vld2.32 {q13}, [r8, :128]! vld2.32 {q14}, [r7, :128]! @@ -686,12 +686,12 @@ _neon_ee_o_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop + bne 1b ldr r11, [r0, #32] @ p->i1 cmp r11, #0 - beq _neon_oo_o_loop_exit -_neon_oo_o_loop: + beq 3f +2: vld2.32 {q8}, [r6, :128]! vld2.32 {q9}, [r5, :128]! vld2.32 {q10}, [r4, :128]! @@ -733,9 +733,9 @@ _neon_oo_o_loop: vtrn.32 q5, q7 vst2.32 {q4,q5}, [r3, :128]! vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop + bne 2b -_neon_oo_o_loop_exit: +3: ldr r11, [r0, #8] vld1.32 {q8}, [r5, :128]! vld1.32 {q10}, [r6, :128]! @@ -811,10 +811,10 @@ _neon_oo_o_loop_exit: vswp d9, d10 vswp d13, d14 vstmia lr!, {q4-q7} - beq _neon_ee_o_loop2_exit + beq 5f vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop2: +4: vld2.32 {q15}, [r5, :128]! vld2.32 {q13}, [r4, :128]! vld2.32 {q14}, [r1, :128]! @@ -878,9 +878,9 @@ _neon_ee_o_loop2: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop2 + bne 4b -_neon_ee_o_loop2_exit: +5: vpop {q4-q7} pop {r4-r12, pc} @@ -911,7 +911,7 @@ neon_static_o_i: add r9, r10, lr, lsl #2 vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop: +1: vld2.32 {q15}, [r10, :128]! vld2.32 {q13}, [r8, :128]! vld2.32 {q14}, [r7, :128]! @@ -975,12 +975,12 @@ _neon_ee_o_loop: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop + bne 1b ldr r11, [r0, #32] @ p->i1 cmp r11, #0 - beq _neon_oo_o_loop_exit -_neon_oo_o_loop: + beq 3f +2: vld2.32 {q8}, [r6, :128]! vld2.32 {q9}, [r5, :128]! vld2.32 {q10}, [r4, :128]! @@ -1022,9 +1022,9 @@ _neon_oo_o_loop: vtrn.32 q5, q7 vst2.32 {q4,q5}, [r3, :128]! vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_o_loop + bne 2b -_neon_oo_o_loop_exit: +3: ldr r11, [r0, #8] vld1.32 {q8}, [r5, :128]! vld1.32 {q10}, [r6, :128]! @@ -1100,10 +1100,10 @@ _neon_oo_o_loop_exit: vswp d9, d10 vswp d13, d14 vstmia lr!, {q4-q7} - beq _neon_ee_o_loop2_exit + beq 5f vld1.32 {d16, d17}, [r3, :128] -_neon_ee_o_loop2: +4: vld2.32 {q15}, [r5, :128]! vld2.32 {q13}, [r4, :128]! vld2.32 {q14}, [r1, :128]! @@ -1167,9 +1167,9 @@ _neon_ee_o_loop2: vtrn.32 q5, q7 vst2.32 {q4, q5}, [r3, :128]! vst2.32 {q6, q7}, [lr, :128]! - bne _neon_ee_o_loop2 + bne 4b -_neon_ee_o_loop2_exit: +5: vpop {q4-q7} pop {r4-r12, pc} @@ -1296,7 +1296,7 @@ neon_static_x8_f: add r7, r6, r1 @ data5 add r12, r8, r1 @ data7 -neon_x8_loop: +1: vld1.32 {q2, q3}, [r2, :128]! subs r1, r1, #32 vld1.32 {q14, q15}, [r5, :128] @@ -1393,7 +1393,7 @@ neon_x8_loop: vadd.f32 q7, q11, q14 vst1.32 {q4, q5}, [r7, :128]! vst1.32 {q6, q7}, [r12, :128]! - bne neon_x8_loop + bne 1b vpop {q4-q7} pop {r4-r8, pc} @@ -1417,7 +1417,7 @@ neon_static_x8_i: add r7, r6, r1 @ data5 add r12, r8, r1 @ data7 -neon_x8_loop: +1: vld1.32 {q2, q3}, [r2, :128]! subs r1, r1, #32 vld1.32 {q14, q15}, [r5, :128] @@ -1514,7 +1514,7 @@ neon_x8_loop: vsub.f32 q7, q11, q14 vst1.32 {q4, q5}, [r7, :128]! vst1.32 {q6, q7}, [r12, :128]! - bne neon_x8_loop + bne 1b vpop {q4-q7} pop {r4-r8, pc} @@ -1538,7 +1538,7 @@ neon_static_x8_t_f: add r7, r6, r1 @ data5 add r12, r8, r1 @ data7 -neon_x8_t_loop: +1: vld1.32 {q2, q3}, [r2, :128]! subs r1, r1, #32 vld1.32 {q14, q15}, [r5, :128] @@ -1635,7 +1635,7 @@ neon_x8_t_loop: vadd.f32 q7, q11, q14 vst2.32 {q4, q5}, [r7, :128]! vst2.32 {q6, q7}, [r12, :128]! - bne neon_x8_t_loop + bne 1b vpop {q4-q7} pop {r4-r8, pc} @@ -1659,7 +1659,7 @@ neon_static_x8_t_i: add r7, r6, r1 @ data5 add r12, r8, r1 @ data7 -neon_x8_t_loop: +1: vld1.32 {q2, q3}, [r2, :128]! subs r1, r1, #32 vld1.32 {q14, q15}, [r5, :128] @@ -1756,7 +1756,7 @@ neon_x8_t_loop: vsub.f32 q7, q11, q14 vst2.32 {q4, q5}, [r7, :128]! vst2.32 {q6, q7}, [r12,:128]! - bne neon_x8_t_loop + bne 1b vpop {q4-q7} pop {r4-r8, pc} -- cgit v1.1 From a62bc084cad3ea82128c8060b17a488f2fce2587 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 17 Mar 2016 19:43:33 +0200 Subject: Simplify maintenance by using macros --- src/neon_static.s | 1269 ++++++++++++++++------------------------------------- 1 file changed, 381 insertions(+), 888 deletions(-) diff --git a/src/neon_static.s b/src/neon_static.s index e752c70..9121c4b 100644 --- a/src/neon_static.s +++ b/src/neon_static.s @@ -33,7 +33,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ .fpu neon +.macro neon_static_e, forward=1 .align 4 + +.if \forward #ifdef __APPLE__ .globl _neon_static_e_f _neon_static_e_f: @@ -41,6 +44,15 @@ _neon_static_e_f: .globl neon_static_e_f neon_static_e_f: #endif +.else +#ifdef __APPLE__ + .globl _neon_static_e_i +_neon_static_e_i: +#else + .globl neon_static_e_i +neon_static_e_i: +#endif +.endif push {r4-r12, lr} vpush {q4-q7} @@ -93,22 +105,42 @@ neon_static_e_f: vsub.f32 q7, q4, q0 vsub.f32 q9, q12, q11 vsub.f32 q13, q5, q3 +.if \forward vsub.f32 d29, d5, d2 +.else + vadd.f32 d29, d5, d2 +.endif vadd.f32 q5, q5, q3 vadd.f32 q10, q4, q0 vadd.f32 q11, q12, q11 +.if \forward vadd.f32 d31, d5, d2 vadd.f32 d28, d4, d3 vsub.f32 d30, d4, d3 vsub.f32 d5, d19, d14 vsub.f32 d7, d31, d26 +.else + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 +.endif vadd.f32 q1, q14, q5 vadd.f32 q0, q11, q10 +.if \forward vadd.f32 d6, d30, d27 vadd.f32 d4, d18, d15 vadd.f32 d13, d19, d14 vsub.f32 d12, d18, d15 vadd.f32 d15, d31, d26 +.else + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 +.endif ldr r3, [r12], #4 vtrn.32 q1, q3 ldr lr, [r12], #4 @@ -117,7 +149,11 @@ neon_static_e_f: vsub.f32 q4, q11, q10 add lr, r2, lr, lsl #2 vsub.f32 q5, q14, q5 +.if \forward vsub.f32 d14, d30, d27 +.else + vadd.f32 d14, d30, d27 +.endif vst2.32 {q0, q1}, [r3, :128]! vst2.32 {q2, q3}, [lr, :128]! vtrn.32 q4, q6 @@ -137,11 +173,21 @@ neon_static_e_f: vsub.f32 q10, q9, q8 vadd.f32 q8, q9, q8 vadd.f32 q9, q12, q8 +.if \forward vsub.f32 d9, d23, d20 vadd.f32 d11, d23, d20 +.else + vadd.f32 d9, d23, d20 + vsub.f32 d11, d23, d20 +.endif vsub.f32 q8, q12, q8 +.if \forward vadd.f32 d8, d22, d21 vsub.f32 d10, d22, d21 +.else + vsub.f32 d8, d22, d21 + vadd.f32 d10, d22, d21 +.endif ldr r3, [r12], #4 vld1.32 {d20, d21}, [r11, :128] ldr lr, [r12], #4 @@ -158,11 +204,21 @@ neon_static_e_f: vsub.f32 q12, q0, q11 vadd.f32 q11, q0, q11 vadd.f32 q13, q15, q13 +.if \forward vsub.f32 d13, d29, d24 +.else + vadd.f32 d13, d29, d24 +.endif vadd.f32 q15, q13, q11 +.if \forward vadd.f32 d12, d28, d25 vadd.f32 d15, d29, d24 vsub.f32 d14, d28, d25 +.else + vsub.f32 d12, d28, d25 + vsub.f32 d15, d29, d24 + vadd.f32 d14, d28, d25 +.endif vtrn.32 q15, q6 vsub.f32 q15, q13, q11 vtrn.32 q15, q7 @@ -187,10 +243,17 @@ neon_static_e_f: vsub.f32 q10, q12, q10 vadd.f32 q0, q9, q11 vsub.f32 q2, q9, q11 +.if \forward vsub.f32 d3, d17, d20 vadd.f32 d7, d17, d20 vadd.f32 d2, d16, d21 vsub.f32 d6, d16, d21 +.else + vadd.f32 d3, d17, d20 + vsub.f32 d7, d17, d20 + vsub.f32 d2, d16, d21 + vadd.f32 d6, d16, d21 +.endif cmp r11, #0 vswp d1, d2 vswp d5, d6 @@ -210,10 +273,17 @@ neon_static_e_f: vld2.32 {q10}, [r9, :128]! vld2.32 {q13}, [r7, :128]! vsub.f32 q2, q12, q11 +.if \forward vadd.f32 d7, d19, d16 vsub.f32 d3, d19, d16 vsub.f32 d6, d18, d17 vadd.f32 d2, d18, d17 +.else + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 +.endif vld2.32 {q9}, [r10, :128]! vld2.32 {q8}, [r8, :128]! vadd.f32 q0, q12, q11 @@ -225,11 +295,21 @@ neon_static_e_f: vadd.f32 q4, q12, q11 vtrn.32 q0, q2 ldr r3, [r12], #4 +.if \forward vadd.f32 d15, d19, d16 +.else + vsub.f32 d15, d19, d16 +.endif ldr lr, [r12], #4 +.if \forward vsub.f32 d11, d19, d16 vsub.f32 d14, d18, d17 vadd.f32 d10, d18, d17 +.else + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 +.endif add r3, r2, r3, lsl #2 vtrn.32 q1, q3 add lr, r2, lr, lsl #2 @@ -277,22 +357,42 @@ neon_static_e_f: vsub.f32 q7, q4, q0 vsub.f32 q9, q12, q11 vsub.f32 q13, q5, q3 +.if \forward vsub.f32 d29, d5, d2 +.else + vadd.f32 d29, d5, d2 +.endif vadd.f32 q5, q5, q3 vadd.f32 q10, q4, q0 vadd.f32 q11, q12, q11 +.if \forward vadd.f32 d31, d5, d2 vadd.f32 d28, d4, d3 vsub.f32 d30, d4, d3 vsub.f32 d5, d19, d14 vsub.f32 d7, d31, d26 +.else + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 +.endif vadd.f32 q1, q14, q5 vadd.f32 q0, q11, q10 +.if \forward vadd.f32 d6, d30, d27 vadd.f32 d4, d18, d15 vadd.f32 d13, d19, d14 vsub.f32 d12, d18, d15 vadd.f32 d15, d31, d26 +.else + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 +.endif ldr r3, [r12], #4 vtrn.32 q1, q3 ldr lr, [r12], #4 @@ -301,7 +401,11 @@ neon_static_e_f: vsub.f32 q4, q11, q10 add lr, r2, lr, lsl #2 vsub.f32 q5, q14, q5 +.if \forward vsub.f32 d14, d30, d27 +.else + vadd.f32 d14, d30, d27 +.endif vst2.32 {q0, q1}, [r3, :128]! vst2.32 {q2, q3}, [lr, :128]! vtrn.32 q4, q6 @@ -313,15 +417,28 @@ neon_static_e_f: 4: vpop {q4-q7} pop {r4-r12, pc} +.endm +.macro neon_static_o, forward=1 .align 4 + +.if \forward #ifdef __APPLE__ - .globl _neon_static_e_i -_neon_static_e_i: + .globl _neon_static_o_f +_neon_static_o_f: #else - .globl neon_static_e_i -neon_static_e_i: + .globl neon_static_o_f +neon_static_o_f: +#endif +.else +#ifdef __APPLE__ + .globl _neon_static_o_i +_neon_static_o_i: +#else + .globl neon_static_o_i +neon_static_o_i: #endif +.endif push {r4-r12, lr} vpush {q4-q7} @@ -374,22 +491,42 @@ neon_static_e_i: vsub.f32 q7, q4, q0 vsub.f32 q9, q12, q11 vsub.f32 q13, q5, q3 +.if \forward + vsub.f32 d29, d5, d2 +.else vadd.f32 d29, d5, d2 +.endif vadd.f32 q5, q5, q3 vadd.f32 q10, q4, q0 vadd.f32 q11, q12, q11 +.if \forward + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 +.else vsub.f32 d31, d5, d2 vsub.f32 d28, d4, d3 vadd.f32 d30, d4, d3 vadd.f32 d5, d19, d14 vadd.f32 d7, d31, d26 +.endif vadd.f32 q1, q14, q5 vadd.f32 q0, q11, q10 +.if \forward + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 +.else vsub.f32 d6, d30, d27 vsub.f32 d4, d18, d15 vsub.f32 d13, d19, d14 vadd.f32 d12, d18, d15 vsub.f32 d15, d31, d26 +.endif ldr r3, [r12], #4 vtrn.32 q1, q3 ldr lr, [r12], #4 @@ -398,7 +535,11 @@ neon_static_e_i: vsub.f32 q4, q11, q10 add lr, r2, lr, lsl #2 vsub.f32 q5, q14, q5 +.if \forward + vsub.f32 d14, d30, d27 +.else vadd.f32 d14, d30, d27 +.endif vst2.32 {q0, q1}, [r3, :128]! vst2.32 {q2, q3}, [lr, :128]! vtrn.32 q4, q6 @@ -407,80 +548,12 @@ neon_static_e_i: vst2.32 {q6, q7}, [lr, :128]! bne 1b - ldr r11, [r0, #12] - vld2.32 {q9}, [r5, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q12}, [r4, :128]! - vld2.32 {q0}, [r7, :128]! - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vadd.f32 d9, d23, d20 - vsub.f32 d11, d23, d20 - vsub.f32 q8, q12, q8 - vsub.f32 d8, d22, d21 - vadd.f32 d10, d22, d21 - ldr r3, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r3, r2, r3, lsl #2 - vtrn.32 q8, q5 - add lr, r2, lr, lsl #2 - vswp d9, d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! - vld2.32 {q15}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vadd.f32 d13, d29, d24 - vadd.f32 q15, q13, q11 - vsub.f32 d12, d28, d25 - vsub.f32 d15, d29, d24 - vadd.f32 d14, d28, d25 - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 ldr r11, [r0, #32] @ p->i1 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vadd.f32 d3, d17, d20 - vsub.f32 d7, d17, d20 - vsub.f32 d2, d16, d21 - vadd.f32 d6, d16, d21 cmp r11, #0 - vswp d1, d2 - vswp d5, d6 - vstmia r3!, {q0-q3} - beq 4f - + beq 3f 2: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! vld2.32 {q10}, [r4, :128]! vld2.32 {q13}, [r1, :128]! vadd.f32 q11, q9, q8 @@ -490,11 +563,18 @@ neon_static_e_i: subs r11, r11, #1 vld2.32 {q10}, [r9, :128]! vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 - vadd.f32 d3, d19, d16 - vadd.f32 d6, d18, d17 - vsub.f32 d2, d18, d17 + vsub.f32 q2, q12, q11 +.if \forward + vadd.f32 d7, d19, d16 + vsub.f32 d3, d19, d16 + vsub.f32 d6, d18, d17 + vadd.f32 d2, d18, d17 +.else + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 +.endif vld2.32 {q9}, [r10, :128]! vld2.32 {q8}, [r8, :128]! vadd.f32 q0, q12, q11 @@ -504,552 +584,71 @@ neon_static_e_i: vsub.f32 q9, q10, q9 vsub.f32 q6, q12, q11 vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 ldr r3, [r12], #4 +.if \forward + vadd.f32 d15, d19, d16 +.else vsub.f32 d15, d19, d16 +.endif ldr lr, [r12], #4 +.if \forward + vsub.f32 d11, d19, d16 + vsub.f32 d14, d18, d17 + vadd.f32 d10, d18, d17 +.else vadd.f32 d11, d19, d16 vadd.f32 d14, d18, d17 vsub.f32 d10, d18, d17 +.endif add r3, r2, r3, lsl #2 vtrn.32 q1, q3 add lr, r2, lr, lsl #2 vst2.32 {q0,q1}, [r3, :128]! vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4,q5}, [r3, :128]! + vst2.32 {q6,q7}, [lr, :128]! bne 2b - ldr r3, [r0, #16] @ p->ee_ws - ldr r11, [r0, #32] @ p->i1 - vld1.32 {d16, d17}, [r3, :128] 3: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne 3b - -4: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_o_f -_neon_static_o_f: -#else - .globl neon_static_o_f -neon_static_o_f: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -1: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne 1b - - ldr r11, [r0, #32] @ p->i1 - cmp r11, #0 - beq 3f -2: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vadd.f32 d7, d19, d16 - vsub.f32 d3, d19, d16 - vsub.f32 d6, d18, d17 - vadd.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vadd.f32 d15, d19, d16 - ldr lr, [r12], #4 - vsub.f32 d11, d19, d16 - vsub.f32 d14, d18, d17 - vadd.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r3, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne 2b - -3: - ldr r11, [r0, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q15}, [r8, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r3, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r3, r2, r3, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r2, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vsub.f32 d25, d19, d16 - vadd.f32 d27, d19, d16 - vsub.f32 q1, q11, q10 - vadd.f32 d24, d18, d17 - vsub.f32 d26, d18, d17 - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r3, :128]! - vld2.32 {q0}, [r7, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r10, :128]! - vld2.32 {q14}, [r9, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vsub.f32 d29, d1, d30 - vadd.f32 d27, d1, d30 - vsub.f32 q3, q3, q1 - vadd.f32 d28, d0, d31 - vsub.f32 d26, d0, d31 - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r3, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - ldr r3, [r0, #16] @ p->ee_ws - vsub.f32 d21, d30, d19 - ldr r11, [r0, #32] @ p->i1 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vsub.f32 d11, d27, d16 - vadd.f32 d15, d27, d16 - vadd.f32 d10, d26, d17 - vsub.f32 d14, d26, d17 - cmp r11, #0 - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - beq 5f - - vld1.32 {d16, d17}, [r3, :128] -4: - vld2.32 {q15}, [r5, :128]! - vld2.32 {q13}, [r4, :128]! - vld2.32 {q14}, [r1, :128]! - vld2.32 {q9}, [r10, :128]! - vld2.32 {q10}, [r9, :128]! - vld2.32 {q11}, [r8, :128]! - vld2.32 {q12}, [r7, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r6, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vsub.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vadd.f32 d31, d5, d2 - vadd.f32 d28, d4, d3 - vsub.f32 d30, d4, d3 - vsub.f32 d5, d19, d14 - vsub.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vadd.f32 d6, d30, d27 - vadd.f32 d4, d18, d15 - vadd.f32 d13, d19, d14 - vsub.f32 d12, d18, d15 - vadd.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vsub.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne 4b - -5: - vpop {q4-q7} - pop {r4-r12, pc} - - .align 4 -#ifdef __APPLE__ - .globl _neon_static_o_i -_neon_static_o_i: -#else - .globl neon_static_o_i -neon_static_o_i: -#endif - push {r4-r12, lr} - vpush {q4-q7} - - ldr lr, [r0, #40] @ p->N - ldr r12, [r0 ] @ p->offsets - ldr r3, [r0, #16] @ p->ee_ws - - add r7, r1, lr - add r5, r1, lr, lsl #1 - add r4, r1, lr, lsl #2 - add r10, r7, lr, lsl #1 - add r8, r7, lr, lsl #2 - - ldr r11, [r0, #28] @ p->i0 - - add r6, r4, lr, lsl #1 - add r9, r10, lr, lsl #2 - - vld1.32 {d16, d17}, [r3, :128] -1: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r1, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 - vsub.f32 d28, d4, d3 - vadd.f32 d30, d4, d3 - vadd.f32 d5, d19, d14 - vadd.f32 d7, d31, d26 - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 - vsub.f32 d4, d18, d15 - vsub.f32 d13, d19, d14 - vadd.f32 d12, d18, d15 - vsub.f32 d15, d31, d26 - ldr r3, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r3, r2, r3, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r2, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 - vst2.32 {q0, q1}, [r3, :128]! - vst2.32 {q2, q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4, q5}, [r3, :128]! - vst2.32 {q6, q7}, [lr, :128]! - bne 1b - - ldr r11, [r0, #32] @ p->i1 - cmp r11, #0 - beq 3f -2: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r9, :128]! - vld2.32 {q13}, [r7, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 - vadd.f32 d3, d19, d16 - vadd.f32 d6, d18, d17 - vsub.f32 d2, d18, d17 - vld2.32 {q9}, [r10, :128]! - vld2.32 {q8}, [r8, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r3, [r12], #4 - vsub.f32 d15, d19, d16 - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 - vadd.f32 d14, d18, d17 - vsub.f32 d10, d18, d17 - add r3, r2, r3, lsl #2 - vtrn.32 q1, q3 - add lr, r2, lr, lsl #2 - vst2.32 {q0,q1}, [r3, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r3, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne 2b - -3: - ldr r11, [r0, #8] - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r1, :128]! - vld2.32 {q15}, [r8, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r3, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r3, r2, r3, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r2, lr, lsl #2 - vadd.f32 q10, q10, q12 + ldr r11, [r0, #8] + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r1, :128]! + vld2.32 {q15}, [r8, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r3, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r3, r2, r3, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r2, lr, lsl #2 + vadd.f32 q10, q10, q12 vadd.f32 q0, q11, q10 +.if \forward + vsub.f32 d25, d19, d16 + vadd.f32 d27, d19, d16 +.else vadd.f32 d25, d19, d16 vsub.f32 d27, d19, d16 +.endif vsub.f32 q1, q11, q10 +.if \forward + vadd.f32 d24, d18, d17 + vsub.f32 d26, d18, d17 +.else vsub.f32 d24, d18, d17 vadd.f32 d26, d18, d17 +.endif vtrn.32 q0, q12 vtrn.32 q1, q13 vld1.32 {d24, d25}, [r11, :128] @@ -1063,11 +662,21 @@ neon_static_o_i: vsub.f32 q0, q14, q13 vadd.f32 q3, q14, q13 vadd.f32 q2, q3, q1 +.if \forward + vsub.f32 d29, d1, d30 + vadd.f32 d27, d1, d30 +.else vadd.f32 d29, d1, d30 vsub.f32 d27, d1, d30 +.endif vsub.f32 q3, q3, q1 +.if \forward + vadd.f32 d28, d0, d31 + vsub.f32 d26, d0, d31 +.else vsub.f32 d28, d0, d31 vadd.f32 d26, d0, d31 +.endif vtrn.32 q2, q14 vtrn.32 q3, q13 vswp d5, d6 @@ -1092,10 +701,17 @@ neon_static_o_i: vsub.f32 q8, q8, q10 vadd.f32 q4, q14, q9 vsub.f32 q6, q14, q9 +.if \forward + vsub.f32 d11, d27, d16 + vadd.f32 d15, d27, d16 + vadd.f32 d10, d26, d17 + vsub.f32 d14, d26, d17 +.else vadd.f32 d11, d27, d16 vsub.f32 d15, d27, d16 vsub.f32 d10, d26, d17 vadd.f32 d14, d26, d17 +.endif cmp r11, #0 vswp d9, d10 vswp d13, d14 @@ -1136,22 +752,42 @@ neon_static_o_i: vsub.f32 q7, q4, q0 vsub.f32 q9, q12, q11 vsub.f32 q13, q5, q3 +.if \forward + vsub.f32 d29, d5, d2 +.else vadd.f32 d29, d5, d2 +.endif vadd.f32 q5, q5, q3 vadd.f32 q10, q4, q0 vadd.f32 q11, q12, q11 +.if \forward + vadd.f32 d31, d5, d2 + vadd.f32 d28, d4, d3 + vsub.f32 d30, d4, d3 + vsub.f32 d5, d19, d14 + vsub.f32 d7, d31, d26 +.else vsub.f32 d31, d5, d2 vsub.f32 d28, d4, d3 vadd.f32 d30, d4, d3 vadd.f32 d5, d19, d14 vadd.f32 d7, d31, d26 +.endif vadd.f32 q1, q14, q5 vadd.f32 q0, q11, q10 +.if \forward + vadd.f32 d6, d30, d27 + vadd.f32 d4, d18, d15 + vadd.f32 d13, d19, d14 + vsub.f32 d12, d18, d15 + vadd.f32 d15, d31, d26 +.else vsub.f32 d6, d30, d27 vsub.f32 d4, d18, d15 vsub.f32 d13, d19, d14 vadd.f32 d12, d18, d15 vsub.f32 d15, d31, d26 +.endif ldr r3, [r12], #4 vtrn.32 q1, q3 ldr lr, [r12], #4 @@ -1160,7 +796,11 @@ neon_static_o_i: vsub.f32 q4, q11, q10 add lr, r2, lr, lsl #2 vsub.f32 q5, q14, q5 +.if \forward + vsub.f32 d14, d30, d27 +.else vadd.f32 d14, d30, d27 +.endif vst2.32 {q0, q1}, [r3, :128]! vst2.32 {q2, q3}, [lr, :128]! vtrn.32 q4, q6 @@ -1172,60 +812,20 @@ neon_static_o_i: 5: vpop {q4-q7} pop {r4-r12, pc} +.endm + +.macro neon_static_x4, forward=1 + .align 4 - .align 4 +.if \forward #ifdef __APPLE__ .globl _neon_static_x4_f _neon_static_x4_f: #else .globl neon_static_x4_f neon_static_x4_f: -#endif - add r3, r0, #64 - vpush {q4-q7} - - vld1.32 {q2, q3}, [r1, :128] - vld1.32 {q12, q13}, [r3, :128]! - mov r2, r0 - vmul.f32 q0, q13, q3 - vld1.32 {q14, q15}, [r3, :128] - vmul.f32 q5, q12, q2 - vld1.32 {q8, q9}, [r0, :128]! - vmul.f32 q1, q14, q2 - vld1.32 {q10, q11}, [r0, :128] - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vadd.f32 q2, q10, q13 - vsub.f32 q4, q8, q15 - vsub.f32 q3, q11, q12 - - vst1.32 {q0, q1}, [r2, :128]! - - vsub.f32 q5, q9, q14 - vsub.f32 q6, q10, q13 - vadd.f32 q7, q11, q12 - - vst1.32 {q2, q3}, [r2, :128]! - vst1.32 {q4, q5}, [r2, :128]! - vst1.32 {q6, q7}, [r2, :128] - - vpop {q4-q7} - bx lr - - .align 4 +#endif +.else #ifdef __APPLE__ .globl _neon_static_x4_i _neon_static_x4_i: @@ -1233,6 +833,7 @@ _neon_static_x4_i: .globl neon_static_x4_i neon_static_x4_i: #endif +.endif add r3, r0, #64 vpush {q4-q7} @@ -1260,24 +861,38 @@ neon_static_x4_i: vsub.f32 q13, q13, q1 vadd.f32 q0, q8, q15 vadd.f32 q1, q9, q14 +.if \forward + vadd.f32 q2, q10, q13 +.else vsub.f32 q2, q10, q13 +.endif vsub.f32 q4, q8, q15 +.if \forward + vsub.f32 q3, q11, q12 +.else vadd.f32 q3, q11, q12 - +.endif vst1.32 {q0, q1}, [r2, :128]! - vsub.f32 q5, q9, q14 +.if \forward + vsub.f32 q6, q10, q13 + vadd.f32 q7, q11, q12 +.else vadd.f32 q6, q10, q13 vsub.f32 q7, q11, q12 - +.endif vst1.32 {q2, q3}, [r2, :128]! vst1.32 {q4, q5}, [r2, :128]! vst1.32 {q6, q7}, [r2, :128] vpop {q4-q7} bx lr +.endm - .align 4 +.macro neon_static_x8, forward=1 + .align 4 + +.if \forward #ifdef __APPLE__ .globl _neon_static_x8_f _neon_static_x8_f: @@ -1285,120 +900,7 @@ _neon_static_x8_f: .globl neon_static_x8_f neon_static_x8_f: #endif - push {r4-r8, lr} - vpush {q4-q7} - - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 - -1: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vadd.f32 q4, q12, q15 - vsub.f32 q6, q12, q15 - vsub.f32 q5, q13, q14 - vadd.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vadd.f32 q2, q8, q10 - vsub.f32 q3, q9, q12 - vst1.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12, :128] - vadd.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst1.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vsub.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst1.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vadd.f32 q2, q10, q15 - vsub.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst1.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vsub.f32 q6, q10, q15 - vst1.32 {q2, q3}, [r5, :128]! - vadd.f32 q7, q11, q14 - vst1.32 {q4, q5}, [r7, :128]! - vst1.32 {q6, q7}, [r12, :128]! - bne 1b - - vpop {q4-q7} - pop {r4-r8, pc} - - .align 4 +.else #ifdef __APPLE__ .globl _neon_static_x8_i _neon_static_x8_i: @@ -1406,6 +908,7 @@ _neon_static_x8_i: .globl neon_static_x8_i neon_static_x8_i: #endif +.endif push {r4-r8, lr} vpush {q4-q7} @@ -1438,10 +941,17 @@ neon_static_x8_i: vld1.32 {q12, q13}, [r3, :128] vsub.f32 q15, q11, q10 vsub.f32 q14, q9, q8 +.if \forward + vadd.f32 q4, q12, q15 + vsub.f32 q6, q12, q15 + vsub.f32 q5, q13, q14 + vadd.f32 q7, q13, q14 +.else vsub.f32 q4, q12, q15 vadd.f32 q6, q12, q15 vadd.f32 q5, q13, q14 vsub.f32 q7, q13, q14 +.endif vld1.32 {q14, q15}, [r8, :128] vld1.32 {q12, q13}, [r6, :128] vmul.f32 q1, q14, q2 @@ -1472,16 +982,29 @@ neon_static_x8_i: vadd.f32 q0, q11, q2 vadd.f32 q1, q13, q15 vsub.f32 q4, q11, q2 +.if \forward + vadd.f32 q2, q8, q10 + vsub.f32 q3, q9, q12 +.else vsub.f32 q2, q8, q10 vadd.f32 q3, q9, q12 +.endif vst1.32 {q0, q1}, [r0, :128]! vsub.f32 q5, q13, q15 vld1.32 {q14, q15}, [r12, :128] +.if \forward + vadd.f32 q7, q9, q12 +.else vsub.f32 q7, q9, q12 +.endif vld1.32 {q12, q13}, [r7, :128] vst1.32 {q2, q3}, [r4, :128]! vld1.32 {q2, q3}, [r2, :128]! +.if \forward + vsub.f32 q6, q8, q10 +.else vadd.f32 q6, q8, q10 +.endif vmul.f32 q8, q14, q2 vst1.32 {q4, q5}, [r6, :128]! vmul.f32 q10, q15, q3 @@ -1504,22 +1027,39 @@ neon_static_x8_i: vld1.32 {q10, q11}, [r5, :128] vadd.f32 q0, q8, q13 vadd.f32 q1, q9, q12 +.if \forward + vadd.f32 q2, q10, q15 + vsub.f32 q3, q11, q14 +.else vsub.f32 q2, q10, q15 vadd.f32 q3, q11, q14 +.endif vsub.f32 q4, q8, q13 vst1.32 {q0, q1}, [r3, :128]! vsub.f32 q5, q9, q12 +.if \forward + vsub.f32 q6, q10, q15 +.else vadd.f32 q6, q10, q15 +.endif vst1.32 {q2, q3}, [r5, :128]! +.if \forward + vadd.f32 q7, q11, q14 +.else vsub.f32 q7, q11, q14 +.endif vst1.32 {q4, q5}, [r7, :128]! vst1.32 {q6, q7}, [r12, :128]! bne 1b vpop {q4-q7} pop {r4-r8, pc} +.endm + +.macro neon_static_x8_t, forward=1 + .align 4 - .align 4 +.if \forward #ifdef __APPLE__ .globl _neon_static_x8_t_f _neon_static_x8_t_f: @@ -1527,6 +1067,15 @@ _neon_static_x8_t_f: .globl neon_static_x8_t_f neon_static_x8_t_f: #endif +.else +#ifdef __APPLE__ + .globl _neon_static_x8_t_i +_neon_static_x8_t_i: +#else + .globl neon_static_x8_t_i +neon_static_x8_t_i: +#endif +.endif push {r4-r8, lr} vpush {q4-q7} @@ -1559,10 +1108,17 @@ neon_static_x8_t_f: vld1.32 {q12, q13}, [r3, :128] vsub.f32 q15, q11, q10 vsub.f32 q14, q9, q8 +.if \forward vadd.f32 q4, q12, q15 vsub.f32 q6, q12, q15 vsub.f32 q5, q13, q14 vadd.f32 q7, q13, q14 +.else + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 +.endif vld1.32 {q14, q15}, [r8, :128] vld1.32 {q12, q13}, [r6, :128] vmul.f32 q1, q14, q2 @@ -1593,16 +1149,29 @@ neon_static_x8_t_f: vadd.f32 q0, q11, q2 vadd.f32 q1, q13, q15 vsub.f32 q4, q11, q2 +.if \forward vadd.f32 q2, q8, q10 vsub.f32 q3, q9, q12 +.else + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 +.endif vst2.32 {q0, q1}, [r0, :128]! vsub.f32 q5, q13, q15 vld1.32 {q14, q15}, [r12, :128] +.if \forward vadd.f32 q7, q9, q12 +.else + vsub.f32 q7, q9, q12 +.endif vld1.32 {q12, q13}, [r7, :128] vst2.32 {q2, q3}, [r4, :128]! vld1.32 {q2, q3}, [r2, :128]! +.if \forward vsub.f32 q6, q8, q10 +.else + vadd.f32 q6, q8, q10 +.endif vmul.f32 q8, q14, q2 vst2.32 {q4, q5}, [r6, :128]! vmul.f32 q10, q15, q3 @@ -1625,138 +1194,62 @@ neon_static_x8_t_f: vld1.32 {q10, q11}, [r5, :128] vadd.f32 q0, q8, q13 vadd.f32 q1, q9, q12 +.if \forward vadd.f32 q2, q10, q15 vsub.f32 q3, q11, q14 +.else + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 +.endif vsub.f32 q4, q8, q13 vst2.32 {q0, q1}, [r3, :128]! vsub.f32 q5, q9, q12 +.if \forward vsub.f32 q6, q10, q15 +.else + vadd.f32 q6, q10, q15 +.endif vst2.32 {q2, q3}, [r5, :128]! +.if \forward vadd.f32 q7, q11, q14 +.else + vsub.f32 q7, q11, q14 +.endif vst2.32 {q4, q5}, [r7, :128]! vst2.32 {q6, q7}, [r12, :128]! bne 1b vpop {q4-q7} pop {r4-r8, pc} +.endm - .align 4 -#ifdef __APPLE__ - .globl _neon_static_x8_t_i -_neon_static_x8_t_i: -#else - .globl neon_static_x8_t_i -neon_static_x8_t_i: -#endif - push {r4-r8, lr} - vpush {q4-q7} +# neon_static_e_f +neon_static_e, forward=1 - add r4, r0, r1, lsl #1 @ data2 - add r3, r0, r1 @ data1 - add r6, r4, r1, lsl #1 @ data4 - add r5, r4, r1 @ data3 - add r8, r6, r1, lsl #1 @ data6 - add r7, r6, r1 @ data5 - add r12, r8, r1 @ data7 +# neon_static_e_i +neon_static_e, forward=0 -1: - vld1.32 {q2, q3}, [r2, :128]! - subs r1, r1, #32 - vld1.32 {q14, q15}, [r5, :128] - vmul.f32 q12, q15, q2 - vld1.32 {q10, q11}, [r4, :128] - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vsub.f32 q10, q12, q8 - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vsub.f32 q9, q1, q14 - vld1.32 {q12, q13}, [r3, :128] - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 - vadd.f32 q6, q12, q15 - vadd.f32 q5, q13, q14 - vsub.f32 q7, q13, q14 - vld1.32 {q14, q15}, [r8, :128] - vld1.32 {q12, q13}, [r6, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4, q5}, [r3, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6, q7}, [r5, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8, q9}, [r0, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 - vadd.f32 q3, q9, q12 - vst2.32 {q0, q1}, [r0, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14, q15}, [r12,:128] - vsub.f32 q7, q9, q12 - vld1.32 {q12, q13}, [r7, :128] - vst2.32 {q2, q3}, [r4, :128]! - vld1.32 {q2, q3}, [r2, :128]! - vadd.f32 q6, q8, q10 - vmul.f32 q8, q14, q2 - vst2.32 {q4, q5}, [r6, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6, q7}, [r8, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8, q9}, [r3, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10, q11}, [r5, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 - vadd.f32 q3, q11, q14 - vsub.f32 q4, q8, q13 - vst2.32 {q0, q1}, [r3, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 - vst2.32 {q2, q3}, [r5, :128]! - vsub.f32 q7, q11, q14 - vst2.32 {q4, q5}, [r7, :128]! - vst2.32 {q6, q7}, [r12,:128]! - bne 1b +# neon_static_o_f +neon_static_o, forward=1 + +# neon_static_o_i +neon_static_o, forward=0 + +# neon_static_x4_f +neon_static_x4, forward=1 + +# neon_static_x4_i +neon_static_x4, forward=0 + +# neon_static_x8_f +neon_static_x8, forward=1 + +# neon_static_x8_i +neon_static_x8, forward=0 + +# neon_static_x8_t_f +neon_static_x8_t, forward=1 + +# neon_static_x8_t_i +neon_static_x8_t, forward=0 - vpop {q4-q7} - pop {r4-r8, pc} -- cgit v1.1 From 2cf68165a461a9faf7069e094436d16c22990aff Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 28 Mar 2016 13:57:47 +0300 Subject: Improve performance of small complex 2D Neon transform by 15% --- src/ffts_nd.c | 69 +-- src/neon.s | 1363 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 714 insertions(+), 718 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 72e21e7..2bde9c4 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -92,74 +92,17 @@ static void ffts_free_nd(ffts_plan_t *p) static void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) { #ifdef HAVE_NEON - size_t i, j, k; - int linebytes = 8 * w; +#if 0 + neon_transpose(in, out, w, h); +#else + size_t i, j; for (j = 0; j < h; j += 8) { for (i = 0; i < w; i += 8) { - neon_transpose_to_buf(in + j*w + i, buf, w); - - uint64_t *p = out + i*h + j; - uint64_t *pbuf = buf; - uint64_t *ptemp; - - __asm__ __volatile__( - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" - "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" - "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" - "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" - "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" - "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" - "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" - "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" - "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" - "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" - "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" - "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" - "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" - "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" - "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" - "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" - "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" - "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" - "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "add %[p], %[p], %[w], lsl #3\n\t" - "vld1.32 {q8,q9}, [%[pbuf], :128]!\n\t" - "vld1.32 {q10,q11}, [%[pbuf], :128]!\n\t" - "vld1.32 {q12,q13}, [%[pbuf], :128]!\n\t" - "vld1.32 {q14,q15}, [%[pbuf], :128]!\n\t" - "vst1.32 {q8,q9}, [%[ptemp], :128]!\n\t" - "vst1.32 {q10,q11}, [%[ptemp], :128]!\n\t" - "mov %[ptemp], %[p]\n\t" - "vst1.32 {q12,q13}, [%[ptemp], :128]!\n\t" - "vst1.32 {q14,q15}, [%[ptemp], :128]!\n\t" - - : [p] "+r" (p), [pbuf] "+r" (pbuf), [ptemp] "+r" (ptemp) - : [w] "r" (w) - : "memory", "q8", "q9", "q10", "q11" - ); - - /* out[i*h + j] = in[j*w + i]; */ + neon_transpose_to_buf(in + j*w + i, out + i*h + j, w); } } +#endif #else #ifdef HAVE_SSE uint64_t FFTS_ALIGN(64) tmp[TSIZE*TSIZE]; diff --git a/src/neon.s b/src/neon.s index ec98250..1e7fb92 100644 --- a/src/neon.s +++ b/src/neon.s @@ -1,327 +1,323 @@ /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2016, Jukka Ojanen +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - .fpu neon + .fpu neon - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_x4 + .globl _neon_x4 _neon_x4: #else - .globl neon_x4 + .globl neon_x4 neon_x4: #endif -@ add r3, r0, #0 - - vld1.32 {q8,q9}, [r0, :128] - add r4, r0, r1, lsl #1 - vld1.32 {q10,q11}, [r4, :128] - add r5, r0, r1, lsl #2 - vld1.32 {q12,q13}, [r5, :128] - add r6, r4, r1, lsl #2 - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q2,q3}, [r2, :128] - - vmul.f32 q0, q13, q3 - vmul.f32 q5, q12, q2 - vmul.f32 q1, q14, q2 - vmul.f32 q4, q14, q3 - vmul.f32 q14, q12, q3 - vmul.f32 q13, q13, q2 - vmul.f32 q12, q15, q3 - vmul.f32 q2, q15, q2 - vsub.f32 q0, q5, q0 - vadd.f32 q13, q13, q14 - vadd.f32 q12, q12, q1 - vsub.f32 q1, q2, q4 - vadd.f32 q15, q0, q12 - vsub.f32 q12, q0, q12 - vadd.f32 q14, q13, q1 - vsub.f32 q13, q13, q1 - vadd.f32 q0, q8, q15 - vadd.f32 q1, q9, q14 - vsub.f32 q2, q10, q13 @ - vsub.f32 q4, q8, q15 - vadd.f32 q3, q11, q12 @ - vst1.32 {q0,q1}, [r0, :128] - vsub.f32 q5, q9, q14 - vadd.f32 q6, q10, q13 @ - vsub.f32 q7, q11, q12 @ - vst1.32 {q2,q3}, [r4, :128] - vst1.32 {q4,q5}, [r5, :128] - vst1.32 {q6,q7}, [r6, :128] - bx lr - - .align 4 + vld1.32 {q8, q9}, [r0, :128] + add r4, r0, r1, lsl #1 + vld1.32 {q10, q11}, [r4, :128] + add r5, r0, r1, lsl #2 + vld1.32 {q12, q13}, [r5, :128] + add r6, r4, r1, lsl #2 + vld1.32 {q14, q15}, [r6, :128] + vld1.32 {q2, q3}, [r2, :128] + + vmul.f32 q0, q13, q3 + vmul.f32 q5, q12, q2 + vmul.f32 q1, q14, q2 + vmul.f32 q4, q14, q3 + vmul.f32 q14, q12, q3 + vmul.f32 q13, q13, q2 + vmul.f32 q12, q15, q3 + vmul.f32 q2, q15, q2 + vsub.f32 q0, q5, q0 + vadd.f32 q13, q13, q14 + vadd.f32 q12, q12, q1 + vsub.f32 q1, q2, q4 + vadd.f32 q15, q0, q12 + vsub.f32 q12, q0, q12 + vadd.f32 q14, q13, q1 + vsub.f32 q13, q13, q1 + vadd.f32 q0, q8, q15 + vadd.f32 q1, q9, q14 + vsub.f32 q2, q10, q13 + vsub.f32 q4, q8, q15 + vadd.f32 q3, q11, q12 + vst1.32 {q0, q1}, [r0, :128] + vsub.f32 q5, q9, q14 + vadd.f32 q6, q10, q13 + vsub.f32 q7, q11, q12 + vst1.32 {q2, q3}, [r4, :128] + vst1.32 {q4, q5}, [r5, :128] + vst1.32 {q6, q7}, [r6, :128] + bx lr + + .align 4 #ifdef __APPLE__ - .globl _neon_x8 + .globl _neon_x8 _neon_x8: #else - .globl neon_x8 + .globl neon_x8 neon_x8: #endif - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 @ - vadd.f32 q6, q12, q15 @ - vadd.f32 q5, q13, q14 @ - vsub.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 @ - vadd.f32 q3, q9, q12 @ - vst1.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vsub.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst1.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vadd.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst1.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst1.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 @ - vadd.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst1.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 @ - vst1.32 {q2,q3}, [r6, :128]! - vsub.f32 q7, q11, q14 @ - vst1.32 {q4,q5}, [r8, :128]! - vst1.32 {q6,q7}, [r10, :128]! - bne neon_x8_loop - - bx lr - - .align 4 + mov r11, #0 + add r3, r0, #0 @ data0 + add r5, r0, r1, lsl #1 @ data2 + add r4, r0, r1 @ data1 + add r7, r5, r1, lsl #1 @ data4 + add r6, r5, r1 @ data3 + add r9, r7, r1, lsl #1 @ data6 + add r8, r7, r1 @ data5 + add r10, r9, r1 @ data7 + add r12, r2, #0 @ LUT + + sub r11, r11, r1, lsr #5 +1: + vld1.32 {q2, q3}, [r12, :128]! + vld1.32 {q14, q15}, [r6, :128] + vld1.32 {q10, q11}, [r5, :128] + adds r11, r11, #1 + vmul.f32 q12, q15, q2 + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vld1.32 {q2, q3}, [r12, :128]! + vsub.f32 q10, q12, q8 + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vld1.32 {q12, q13}, [r4, :128] + vsub.f32 q9, q1, q14 + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r9, :128] + vld1.32 {q12, q13}, [r7, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r4, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r6, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r3, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst1.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r10, :128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r8, :128] + vst1.32 {q2, q3}, [r5, :128]! + vld1.32 {q2, q3}, [r12, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst1.32 {q4, q5}, [r7, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst1.32 {q6, q7}, [r9, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r4, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r6, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst1.32 {q0, q1}, [r4, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst1.32 {q2, q3}, [r6, :128]! + vsub.f32 q7, q11, q14 + vst1.32 {q4, q5}, [r8, :128]! + vst1.32 {q6, q7}, [r10, :128]! + bne 1b + bx lr + + .align 4 #ifdef __APPLE__ - .globl _neon_x8_t + .globl _neon_x8_t _neon_x8_t: #else - .globl neon_x8_t + .globl neon_x8_t neon_x8_t: #endif - mov r11, #0 - add r3, r0, #0 @ data0 - add r5, r0, r1, lsl #1 @ data2 - add r4, r0, r1 @ data1 - add r7, r5, r1, lsl #1 @ data4 - add r6, r5, r1 @ data3 - add r9, r7, r1, lsl #1 @ data6 - add r8, r7, r1 @ data5 - add r10, r9, r1 @ data7 - add r12, r2, #0 @ LUT - - sub r11, r11, r1, lsr #5 -neon_x8_t_loop: - vld1.32 {q2,q3}, [r12, :128]! - vld1.32 {q14,q15}, [r6, :128] - vld1.32 {q10,q11}, [r5, :128] - adds r11, r11, #1 - vmul.f32 q12, q15, q2 - vmul.f32 q8, q14, q3 - vmul.f32 q13, q14, q2 - vmul.f32 q9, q10, q3 - vmul.f32 q1, q10, q2 - vmul.f32 q0, q11, q2 - vmul.f32 q14, q11, q3 - vmul.f32 q15, q15, q3 - vld1.32 {q2,q3}, [r12, :128]! - vsub.f32 q10, q12, q8 - vadd.f32 q11, q0, q9 - vadd.f32 q8, q15, q13 - vld1.32 {q12,q13}, [r4, :128] - vsub.f32 q9, q1, q14 - vsub.f32 q15, q11, q10 - vsub.f32 q14, q9, q8 - vsub.f32 q4, q12, q15 @ - vadd.f32 q6, q12, q15 @ - vadd.f32 q5, q13, q14 @ - vsub.f32 q7, q13, q14 @ - vld1.32 {q14,q15}, [r9, :128] - vld1.32 {q12,q13}, [r7, :128] - vmul.f32 q1, q14, q2 - vmul.f32 q0, q14, q3 - vst1.32 {q4,q5}, [r4, :128] - vmul.f32 q14, q15, q3 - vmul.f32 q4, q15, q2 - vadd.f32 q15, q9, q8 - vst1.32 {q6,q7}, [r6, :128] - vmul.f32 q8, q12, q3 - vmul.f32 q5, q13, q3 - vmul.f32 q12, q12, q2 - vmul.f32 q9, q13, q2 - vadd.f32 q14, q14, q1 - vsub.f32 q13, q4, q0 - vadd.f32 q0, q9, q8 - vld1.32 {q8,q9}, [r3, :128] - vadd.f32 q1, q11, q10 - vsub.f32 q12, q12, q5 - vadd.f32 q11, q8, q15 - vsub.f32 q8, q8, q15 - vadd.f32 q2, q12, q14 - vsub.f32 q10, q0, q13 - vadd.f32 q15, q0, q13 - vadd.f32 q13, q9, q1 - vsub.f32 q9, q9, q1 - vsub.f32 q12, q12, q14 - vadd.f32 q0, q11, q2 - vadd.f32 q1, q13, q15 - vsub.f32 q4, q11, q2 - vsub.f32 q2, q8, q10 @ - vadd.f32 q3, q9, q12 @ - vst2.32 {q0,q1}, [r3, :128]! - vsub.f32 q5, q13, q15 - vld1.32 {q14,q15}, [r10, :128] - vsub.f32 q7, q9, q12 @ - vld1.32 {q12,q13}, [r8, :128] - vst2.32 {q2,q3}, [r5, :128]! - vld1.32 {q2,q3}, [r12, :128]! - vadd.f32 q6, q8, q10 @ - vmul.f32 q8, q14, q2 - vst2.32 {q4,q5}, [r7, :128]! - vmul.f32 q10, q15, q3 - vmul.f32 q9, q13, q3 - vmul.f32 q11, q12, q2 - vmul.f32 q14, q14, q3 - vst2.32 {q6,q7}, [r9, :128]! - vmul.f32 q15, q15, q2 - vmul.f32 q12, q12, q3 - vmul.f32 q13, q13, q2 - vadd.f32 q10, q10, q8 - vsub.f32 q11, q11, q9 - vld1.32 {q8,q9}, [r4, :128] - vsub.f32 q14, q15, q14 - vadd.f32 q15, q13, q12 - vadd.f32 q13, q11, q10 - vadd.f32 q12, q15, q14 - vsub.f32 q15, q15, q14 - vsub.f32 q14, q11, q10 - vld1.32 {q10,q11}, [r6, :128] - vadd.f32 q0, q8, q13 - vadd.f32 q1, q9, q12 - vsub.f32 q2, q10, q15 @ - vadd.f32 q3, q11, q14 @ - vsub.f32 q4, q8, q13 - vst2.32 {q0,q1}, [r4, :128]! - vsub.f32 q5, q9, q12 - vadd.f32 q6, q10, q15 @ - vst2.32 {q2,q3}, [r6, :128]! - vsub.f32 q7, q11, q14 @ - vst2.32 {q4,q5}, [r8, :128]! - vst2.32 {q6,q7}, [r10, :128]! - bne neon_x8_t_loop - - @bx lr + mov r11, #0 + add r3, r0, #0 @ data0 + add r5, r0, r1, lsl #1 @ data2 + add r4, r0, r1 @ data1 + add r7, r5, r1, lsl #1 @ data4 + add r6, r5, r1 @ data3 + add r9, r7, r1, lsl #1 @ data6 + add r8, r7, r1 @ data5 + add r10, r9, r1 @ data7 + add r12, r2, #0 @ LUT + + sub r11, r11, r1, lsr #5 +1: + vld1.32 {q2, q3}, [r12, :128]! + vld1.32 {q14, q15}, [r6, :128] + vld1.32 {q10, q11}, [r5, :128] + adds r11, r11, #1 + vmul.f32 q12, q15, q2 + vmul.f32 q8, q14, q3 + vmul.f32 q13, q14, q2 + vmul.f32 q9, q10, q3 + vmul.f32 q1, q10, q2 + vmul.f32 q0, q11, q2 + vmul.f32 q14, q11, q3 + vmul.f32 q15, q15, q3 + vld1.32 {q2, q3}, [r12, :128]! + vsub.f32 q10, q12, q8 + vadd.f32 q11, q0, q9 + vadd.f32 q8, q15, q13 + vld1.32 {q12, q13}, [r4, :128] + vsub.f32 q9, q1, q14 + vsub.f32 q15, q11, q10 + vsub.f32 q14, q9, q8 + vsub.f32 q4, q12, q15 + vadd.f32 q6, q12, q15 + vadd.f32 q5, q13, q14 + vsub.f32 q7, q13, q14 + vld1.32 {q14, q15}, [r9, :128] + vld1.32 {q12, q13}, [r7, :128] + vmul.f32 q1, q14, q2 + vmul.f32 q0, q14, q3 + vst1.32 {q4, q5}, [r4, :128] + vmul.f32 q14, q15, q3 + vmul.f32 q4, q15, q2 + vadd.f32 q15, q9, q8 + vst1.32 {q6, q7}, [r6, :128] + vmul.f32 q8, q12, q3 + vmul.f32 q5, q13, q3 + vmul.f32 q12, q12, q2 + vmul.f32 q9, q13, q2 + vadd.f32 q14, q14, q1 + vsub.f32 q13, q4, q0 + vadd.f32 q0, q9, q8 + vld1.32 {q8, q9}, [r3, :128] + vadd.f32 q1, q11, q10 + vsub.f32 q12, q12, q5 + vadd.f32 q11, q8, q15 + vsub.f32 q8, q8, q15 + vadd.f32 q2, q12, q14 + vsub.f32 q10, q0, q13 + vadd.f32 q15, q0, q13 + vadd.f32 q13, q9, q1 + vsub.f32 q9, q9, q1 + vsub.f32 q12, q12, q14 + vadd.f32 q0, q11, q2 + vadd.f32 q1, q13, q15 + vsub.f32 q4, q11, q2 + vsub.f32 q2, q8, q10 + vadd.f32 q3, q9, q12 + vst2.32 {q0, q1}, [r3, :128]! + vsub.f32 q5, q13, q15 + vld1.32 {q14, q15}, [r10, :128] + vsub.f32 q7, q9, q12 + vld1.32 {q12, q13}, [r8, :128] + vst2.32 {q2, q3}, [r5, :128]! + vld1.32 {q2, q3}, [r12, :128]! + vadd.f32 q6, q8, q10 + vmul.f32 q8, q14, q2 + vst2.32 {q4, q5}, [r7, :128]! + vmul.f32 q10, q15, q3 + vmul.f32 q9, q13, q3 + vmul.f32 q11, q12, q2 + vmul.f32 q14, q14, q3 + vst2.32 {q6, q7}, [r9, :128]! + vmul.f32 q15, q15, q2 + vmul.f32 q12, q12, q3 + vmul.f32 q13, q13, q2 + vadd.f32 q10, q10, q8 + vsub.f32 q11, q11, q9 + vld1.32 {q8, q9}, [r4, :128] + vsub.f32 q14, q15, q14 + vadd.f32 q15, q13, q12 + vadd.f32 q13, q11, q10 + vadd.f32 q12, q15, q14 + vsub.f32 q15, q15, q14 + vsub.f32 q14, q11, q10 + vld1.32 {q10, q11}, [r6, :128] + vadd.f32 q0, q8, q13 + vadd.f32 q1, q9, q12 + vsub.f32 q2, q10, q15 + vadd.f32 q3, q11, q14 + vsub.f32 q4, q8, q13 + vst2.32 {q0, q1}, [r4, :128]! + vsub.f32 q5, q9, q12 + vadd.f32 q6, q10, q15 + vst2.32 {q2, q3}, [r6, :128]! + vsub.f32 q7, q11, q14 + vst2.32 {q4, q5}, [r8, :128]! + vst2.32 {q6, q7}, [r10, :128]! + bne 1b @ assumes r0 = out @ r1 = in ? @@ -330,80 +326,80 @@ neon_x8_t_loop: @ r3-r10 = data pointers @ r11 = loop iterations @ r2 & lr = temps - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_ee + .globl _neon_ee _neon_ee: #else - .globl neon_ee + .globl neon_ee neon_ee: #endif - vld1.32 {d16, d17}, [r2, :128] -_neon_ee_loop: - vld2.32 {q15}, [r10, :128]! - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vld2.32 {q9}, [r4, :128]! - vld2.32 {q10}, [r3, :128]! - vld2.32 {q11}, [r6, :128]! - vld2.32 {q12}, [r5, :128]! - vsub.f32 q1, q14, q13 - vld2.32 {q0}, [r9, :128]! - subs r11, r11, #1 - vsub.f32 q2, q0, q15 - vadd.f32 q0, q0, q15 - vmul.f32 d10, d2, d17 - vmul.f32 d11, d3, d16 - vmul.f32 d12, d3, d17 - vmul.f32 d6, d4, d17 - vmul.f32 d7, d5, d16 - vmul.f32 d8, d4, d16 - vmul.f32 d9, d5, d17 - vmul.f32 d13, d2, d16 - vsub.f32 d7, d7, d6 - vadd.f32 d11, d11, d10 - vsub.f32 q1, q12, q11 - vsub.f32 q2, q10, q9 - vadd.f32 d6, d9, d8 - vadd.f32 q4, q14, q13 - vadd.f32 q11, q12, q11 - vadd.f32 q12, q10, q9 - vsub.f32 d10, d13, d12 - vsub.f32 q7, q4, q0 - vsub.f32 q9, q12, q11 - vsub.f32 q13, q5, q3 - vadd.f32 d29, d5, d2 @ - vadd.f32 q5, q5, q3 - vadd.f32 q10, q4, q0 - vadd.f32 q11, q12, q11 - vsub.f32 d31, d5, d2 @ - vsub.f32 d28, d4, d3 @ - vadd.f32 d30, d4, d3 @ - vadd.f32 d5, d19, d14 @- - vadd.f32 d7, d31, d26 @- - vadd.f32 q1, q14, q5 - vadd.f32 q0, q11, q10 - vsub.f32 d6, d30, d27 @- - vsub.f32 d4, d18, d15 @- - vsub.f32 d13, d19, d14 @- - vadd.f32 d12, d18, d15 @- - vsub.f32 d15, d31, d26 @- - ldr r2, [r12], #4 - vtrn.32 q1, q3 - ldr lr, [r12], #4 - vtrn.32 q0, q2 - add r2, r0, r2, lsl #2 - vsub.f32 q4, q11, q10 - add lr, r0, lr, lsl #2 - vsub.f32 q5, q14, q5 - vadd.f32 d14, d30, d27 @- - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_ee_loop + vld1.32 {d16, d17}, [r2, :128] +1: + vld2.32 {q15}, [r10, :128]! + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vld2.32 {q9}, [r4, :128]! + vld2.32 {q10}, [r3, :128]! + vld2.32 {q11}, [r6, :128]! + vld2.32 {q12}, [r5, :128]! + vsub.f32 q1, q14, q13 + vld2.32 {q0}, [r9, :128]! + subs r11, r11, #1 + vsub.f32 q2, q0, q15 + vadd.f32 q0, q0, q15 + vmul.f32 d10, d2, d17 + vmul.f32 d11, d3, d16 + vmul.f32 d12, d3, d17 + vmul.f32 d6, d4, d17 + vmul.f32 d7, d5, d16 + vmul.f32 d8, d4, d16 + vmul.f32 d9, d5, d17 + vmul.f32 d13, d2, d16 + vsub.f32 d7, d7, d6 + vadd.f32 d11, d11, d10 + vsub.f32 q1, q12, q11 + vsub.f32 q2, q10, q9 + vadd.f32 d6, d9, d8 + vadd.f32 q4, q14, q13 + vadd.f32 q11, q12, q11 + vadd.f32 q12, q10, q9 + vsub.f32 d10, d13, d12 + vsub.f32 q7, q4, q0 + vsub.f32 q9, q12, q11 + vsub.f32 q13, q5, q3 + vadd.f32 d29, d5, d2 + vadd.f32 q5, q5, q3 + vadd.f32 q10, q4, q0 + vadd.f32 q11, q12, q11 + vsub.f32 d31, d5, d2 + vsub.f32 d28, d4, d3 + vadd.f32 d30, d4, d3 + vadd.f32 d5, d19, d14 + vadd.f32 d7, d31, d26 + vadd.f32 q1, q14, q5 + vadd.f32 q0, q11, q10 + vsub.f32 d6, d30, d27 + vsub.f32 d4, d18, d15 + vsub.f32 d13, d19, d14 + vadd.f32 d12, d18, d15 + vsub.f32 d15, d31, d26 + ldr r2, [r12], #4 + vtrn.32 q1, q3 + ldr lr, [r12], #4 + vtrn.32 q0, q2 + add r2, r0, r2, lsl #2 + vsub.f32 q4, q11, q10 + add lr, r0, lr, lsl #2 + vsub.f32 q5, q14, q5 + vadd.f32 d14, d30, d27 + vst2.32 {q0, q1}, [r2, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r2, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne 1b @ assumes r0 = out @ @@ -411,57 +407,57 @@ _neon_ee_loop: @ r3-r10 = data pointers @ r11 = loop iterations @ r2 & lr = temps - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_oo + .globl _neon_oo _neon_oo: #else - .globl neon_oo + .globl neon_oo neon_oo: #endif -_neon_oo_loop: - vld2.32 {q8}, [r6, :128]! - vld2.32 {q9}, [r5, :128]! - vld2.32 {q10}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vadd.f32 q11, q9, q8 - vsub.f32 q8, q9, q8 - vsub.f32 q9, q13, q10 - vadd.f32 q12, q13, q10 - subs r11, r11, #1 - vld2.32 {q10}, [r7, :128]! - vld2.32 {q13}, [r9, :128]! - vsub.f32 q2, q12, q11 - vsub.f32 d7, d19, d16 @ - vadd.f32 d3, d19, d16 @ - vadd.f32 d6, d18, d17 @ - vsub.f32 d2, d18, d17 @ - vld2.32 {q9}, [r8, :128]! - vld2.32 {q8}, [r10, :128]! - vadd.f32 q0, q12, q11 - vadd.f32 q11, q13, q8 - vadd.f32 q12, q10, q9 - vsub.f32 q8, q13, q8 - vsub.f32 q9, q10, q9 - vsub.f32 q6, q12, q11 - vadd.f32 q4, q12, q11 - vtrn.32 q0, q2 - ldr r2, [r12], #4 - vsub.f32 d15, d19, d16 @ - ldr lr, [r12], #4 - vadd.f32 d11, d19, d16 @ - vadd.f32 d14, d18, d17 @ - vsub.f32 d10, d18, d17 @ - add r2, r0, r2, lsl #2 - vtrn.32 q1, q3 - add lr, r0, lr, lsl #2 - vst2.32 {q0,q1}, [r2, :128]! - vst2.32 {q2,q3}, [lr, :128]! - vtrn.32 q4, q6 - vtrn.32 q5, q7 - vst2.32 {q4,q5}, [r2, :128]! - vst2.32 {q6,q7}, [lr, :128]! - bne _neon_oo_loop +1: + vld2.32 {q8}, [r6, :128]! + vld2.32 {q9}, [r5, :128]! + vld2.32 {q10}, [r4, :128]! + vld2.32 {q13}, [r3, :128]! + vadd.f32 q11, q9, q8 + vsub.f32 q8, q9, q8 + vsub.f32 q9, q13, q10 + vadd.f32 q12, q13, q10 + subs r11, r11, #1 + vld2.32 {q10}, [r7, :128]! + vld2.32 {q13}, [r9, :128]! + vsub.f32 q2, q12, q11 + vsub.f32 d7, d19, d16 + vadd.f32 d3, d19, d16 + vadd.f32 d6, d18, d17 + vsub.f32 d2, d18, d17 + vld2.32 {q9}, [r8, :128]! + vld2.32 {q8}, [r10, :128]! + vadd.f32 q0, q12, q11 + vadd.f32 q11, q13, q8 + vadd.f32 q12, q10, q9 + vsub.f32 q8, q13, q8 + vsub.f32 q9, q10, q9 + vsub.f32 q6, q12, q11 + vadd.f32 q4, q12, q11 + vtrn.32 q0, q2 + ldr r2, [r12], #4 + vsub.f32 d15, d19, d16 + ldr lr, [r12], #4 + vadd.f32 d11, d19, d16 + vadd.f32 d14, d18, d17 + vsub.f32 d10, d18, d17 + add r2, r0, r2, lsl #2 + vtrn.32 q1, q3 + add lr, r0, lr, lsl #2 + vst2.32 {q0, q1}, [r2, :128]! + vst2.32 {q2, q3}, [lr, :128]! + vtrn.32 q4, q6 + vtrn.32 q5, q7 + vst2.32 {q4, q5}, [r2, :128]! + vst2.32 {q6, q7}, [lr, :128]! + bne 1b @ assumes r0 = out @ @@ -469,81 +465,80 @@ _neon_oo_loop: @ r3-r10 = data pointers @ r11 = addr of twiddle @ r2 & lr = temps - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_eo + .globl _neon_eo _neon_eo: #else - .globl neon_eo + .globl neon_eo neon_eo: #endif - vld2.32 {q9}, [r5, :128]! @tag2 - vld2.32 {q13}, [r3, :128]! @tag0 - vld2.32 {q12}, [r4, :128]! @tag1 - vld2.32 {q0}, [r7, :128]! @tag4 - vsub.f32 q11, q13, q12 - vld2.32 {q8}, [r6, :128]! @tag3 - vadd.f32 q12, q13, q12 - vsub.f32 q10, q9, q8 - vadd.f32 q8, q9, q8 - vadd.f32 q9, q12, q8 - vadd.f32 d9, d23, d20 @ - vsub.f32 d11, d23, d20 @ - vsub.f32 q8, q12, q8 - vsub.f32 d8, d22, d21 @ - vadd.f32 d10, d22, d21 @ - ldr r2, [r12], #4 - vld1.32 {d20, d21}, [r11, :128] - ldr lr, [r12], #4 - vtrn.32 q9, q4 - add r2, r0, r2, lsl #2 - vtrn.32 q8, q5 - add lr, r0, lr, lsl #2 - vswp d9,d10 - vst1.32 {d8,d9,d10,d11}, [lr, :128]! - vld2.32 {q13}, [r10, :128]! @tag7 - vld2.32 {q15}, [r9, :128]! @tag6 - vld2.32 {q11}, [r8, :128]! @tag5 - vsub.f32 q14, q15, q13 - vsub.f32 q12, q0, q11 - vadd.f32 q11, q0, q11 - vadd.f32 q13, q15, q13 - vadd.f32 d13, d29, d24 @ - vadd.f32 q15, q13, q11 - vsub.f32 d12, d28, d25 @ - vsub.f32 d15, d29, d24 @ - vadd.f32 d14, d28, d25 @ - vtrn.32 q15, q6 - vsub.f32 q15, q13, q11 - vtrn.32 q15, q7 - vswp d13, d14 - vst1.32 {d12,d13,d14,d15}, [lr, :128]! - vtrn.32 q13, q14 - vtrn.32 q11, q12 - vmul.f32 d24, d26, d21 - vmul.f32 d28, d27, d20 - vmul.f32 d25, d26, d20 - vmul.f32 d26, d27, d21 - vmul.f32 d27, d22, d21 - vmul.f32 d30, d23, d20 - vmul.f32 d29, d23, d21 - vmul.f32 d22, d22, d20 - vsub.f32 d21, d28, d24 - vadd.f32 d20, d26, d25 - vadd.f32 d25, d30, d27 - vsub.f32 d24, d22, d29 - vadd.f32 q11, q12, q10 - vsub.f32 q10, q12, q10 - vadd.f32 q0, q9, q11 - vsub.f32 q2, q9, q11 - vadd.f32 d3, d17, d20 @ - vsub.f32 d7, d17, d20 @ - vsub.f32 d2, d16, d21 @ - vadd.f32 d6, d16, d21 @ - vswp d1, d2 - vswp d5, d6 - vstmia r2!, {q0-q3} - + vld2.32 {q9}, [r5, :128]! + vld2.32 {q13}, [r3, :128]! + vld2.32 {q12}, [r4, :128]! + vld2.32 {q0}, [r7, :128]! + vsub.f32 q11, q13, q12 + vld2.32 {q8}, [r6, :128]! + vadd.f32 q12, q13, q12 + vsub.f32 q10, q9, q8 + vadd.f32 q8, q9, q8 + vadd.f32 q9, q12, q8 + vadd.f32 d9, d23, d20 + vsub.f32 d11, d23, d20 + vsub.f32 q8, q12, q8 + vsub.f32 d8, d22, d21 + vadd.f32 d10, d22, d21 + ldr r2, [r12], #4 + vld1.32 {d20, d21}, [r11, :128] + ldr lr, [r12], #4 + vtrn.32 q9, q4 + add r2, r0, r2, lsl #2 + vtrn.32 q8, q5 + add lr, r0, lr, lsl #2 + vswp d9, d10 + vst1.32 {d8, d9, d10, d11}, [lr, :128]! + vld2.32 {q13}, [r10, :128]! + vld2.32 {q15}, [r9, :128]! + vld2.32 {q11}, [r8, :128]! + vsub.f32 q14, q15, q13 + vsub.f32 q12, q0, q11 + vadd.f32 q11, q0, q11 + vadd.f32 q13, q15, q13 + vadd.f32 d13, d29, d24 + vadd.f32 q15, q13, q11 + vsub.f32 d12, d28, d25 + vsub.f32 d15, d29, d24 + vadd.f32 d14, d28, d25 + vtrn.32 q15, q6 + vsub.f32 q15, q13, q11 + vtrn.32 q15, q7 + vswp d13, d14 + vst1.32 {d12, d13, d14, d15}, [lr, :128]! + vtrn.32 q13, q14 + vtrn.32 q11, q12 + vmul.f32 d24, d26, d21 + vmul.f32 d28, d27, d20 + vmul.f32 d25, d26, d20 + vmul.f32 d26, d27, d21 + vmul.f32 d27, d22, d21 + vmul.f32 d30, d23, d20 + vmul.f32 d29, d23, d21 + vmul.f32 d22, d22, d20 + vsub.f32 d21, d28, d24 + vadd.f32 d20, d26, d25 + vadd.f32 d25, d30, d27 + vsub.f32 d24, d22, d29 + vadd.f32 q11, q12, q10 + vsub.f32 q10, q12, q10 + vadd.f32 q0, q9, q11 + vsub.f32 q2, q9, q11 + vadd.f32 d3, d17, d20 + vsub.f32 d7, d17, d20 + vsub.f32 d2, d16, d21 + vadd.f32 d6, d16, d21 + vswp d1, d2 + vswp d5, d6 + vstmia r2!, {q0-q3} @ assumes r0 = out @ @@ -551,189 +546,247 @@ neon_eo: @ r3-r10 = data pointers @ r11 = addr of twiddle @ r2 & lr = temps - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_oe + .globl _neon_oe _neon_oe: #else - .globl neon_oe + .globl neon_oe neon_oe: #endif - vld1.32 {q8}, [r5, :128]! - vld1.32 {q10}, [r6, :128]! - vld2.32 {q11}, [r4, :128]! - vld2.32 {q13}, [r3, :128]! - vld2.32 {q15}, [r10, :128]! - vorr d25, d17, d17 - vorr d24, d20, d20 - vorr d20, d16, d16 - vsub.f32 q9, q13, q11 - vadd.f32 q11, q13, q11 - ldr r2, [r12], #4 - vtrn.32 d24, d25 - ldr lr, [r12], #4 - vtrn.32 d20, d21 - add r2, r0, r2, lsl #2 - vsub.f32 q8, q10, q12 - add lr, r0, lr, lsl #2 - vadd.f32 q10, q10, q12 - vadd.f32 q0, q11, q10 - vadd.f32 d25, d19, d16 @ - vsub.f32 d27, d19, d16 @ - vsub.f32 q1, q11, q10 - vsub.f32 d24, d18, d17 @ - vadd.f32 d26, d18, d17 @ - vtrn.32 q0, q12 - vtrn.32 q1, q13 - vld1.32 {d24, d25}, [r11, :128] - vswp d1, d2 - vst1.32 {q0, q1}, [r2, :128]! - vld2.32 {q0}, [r9, :128]! - vadd.f32 q1, q0, q15 - vld2.32 {q13}, [r8, :128]! - vld2.32 {q14}, [r7, :128]! - vsub.f32 q15, q0, q15 - vsub.f32 q0, q14, q13 - vadd.f32 q3, q14, q13 - vadd.f32 q2, q3, q1 - vadd.f32 d29, d1, d30 @ - vsub.f32 d27, d1, d30 @ - vsub.f32 q3, q3, q1 - vsub.f32 d28, d0, d31 @ - vadd.f32 d26, d0, d31 @ - vtrn.32 q2, q14 - vtrn.32 q3, q13 - vswp d5, d6 - vst1.32 {q2, q3}, [r2, :128]! - vtrn.32 q11, q9 - vtrn.32 q10, q8 - vmul.f32 d20, d18, d25 - vmul.f32 d22, d19, d24 - vmul.f32 d21, d19, d25 - vmul.f32 d18, d18, d24 - vmul.f32 d19, d16, d25 - vmul.f32 d30, d17, d24 - vmul.f32 d23, d16, d24 - vmul.f32 d24, d17, d25 - vadd.f32 d17, d22, d20 - vsub.f32 d16, d18, d21 - vsub.f32 d21, d30, d19 - vadd.f32 d20, d24, d23 - vadd.f32 q9, q8, q10 - vsub.f32 q8, q8, q10 - vadd.f32 q4, q14, q9 - vsub.f32 q6, q14, q9 - vadd.f32 d11, d27, d16 @ - vsub.f32 d15, d27, d16 @ - vsub.f32 d10, d26, d17 @ - vadd.f32 d14, d26, d17 @ - vswp d9, d10 - vswp d13, d14 - vstmia lr!, {q4-q7} - - - .align 4 + vld1.32 {q8}, [r5, :128]! + vld1.32 {q10}, [r6, :128]! + vld2.32 {q11}, [r4, :128]! + vld2.32 {q13}, [r3, :128]! + vld2.32 {q15}, [r10, :128]! + vorr d25, d17, d17 + vorr d24, d20, d20 + vorr d20, d16, d16 + vsub.f32 q9, q13, q11 + vadd.f32 q11, q13, q11 + ldr r2, [r12], #4 + vtrn.32 d24, d25 + ldr lr, [r12], #4 + vtrn.32 d20, d21 + add r2, r0, r2, lsl #2 + vsub.f32 q8, q10, q12 + add lr, r0, lr, lsl #2 + vadd.f32 q10, q10, q12 + vadd.f32 q0, q11, q10 + vadd.f32 d25, d19, d16 + vsub.f32 d27, d19, d16 + vsub.f32 q1, q11, q10 + vsub.f32 d24, d18, d17 + vadd.f32 d26, d18, d17 + vtrn.32 q0, q12 + vtrn.32 q1, q13 + vld1.32 {d24, d25}, [r11, :128] + vswp d1, d2 + vst1.32 {q0, q1}, [r2, :128]! + vld2.32 {q0}, [r9, :128]! + vadd.f32 q1, q0, q15 + vld2.32 {q13}, [r8, :128]! + vld2.32 {q14}, [r7, :128]! + vsub.f32 q15, q0, q15 + vsub.f32 q0, q14, q13 + vadd.f32 q3, q14, q13 + vadd.f32 q2, q3, q1 + vadd.f32 d29, d1, d30 + vsub.f32 d27, d1, d30 + vsub.f32 q3, q3, q1 + vsub.f32 d28, d0, d31 + vadd.f32 d26, d0, d31 + vtrn.32 q2, q14 + vtrn.32 q3, q13 + vswp d5, d6 + vst1.32 {q2, q3}, [r2, :128]! + vtrn.32 q11, q9 + vtrn.32 q10, q8 + vmul.f32 d20, d18, d25 + vmul.f32 d22, d19, d24 + vmul.f32 d21, d19, d25 + vmul.f32 d18, d18, d24 + vmul.f32 d19, d16, d25 + vmul.f32 d30, d17, d24 + vmul.f32 d23, d16, d24 + vmul.f32 d24, d17, d25 + vadd.f32 d17, d22, d20 + vsub.f32 d16, d18, d21 + vsub.f32 d21, d30, d19 + vadd.f32 d20, d24, d23 + vadd.f32 q9, q8, q10 + vsub.f32 q8, q8, q10 + vadd.f32 q4, q14, q9 + vsub.f32 q6, q14, q9 + vadd.f32 d11, d27, d16 + vsub.f32 d15, d27, d16 + vsub.f32 d10, d26, d17 + vadd.f32 d14, d26, d17 + vswp d9, d10 + vswp d13, d14 + vstmia lr!, {q4-q7} + + .align 4 #ifdef __APPLE__ - .globl _neon_end + .globl _neon_end _neon_end: #else - .globl neon_end + .globl neon_end neon_end: #endif - bx lr + bx lr - - .align 4 + .align 4 #ifdef __APPLE__ - .globl _neon_transpose + .globl _neon_transpose _neon_transpose: #else - .globl neon_transpose + .globl neon_transpose neon_transpose: #endif - push {r4-r8} - @ vpush {q8-q9} - mov r5, r3 -_neon_transpose_col: - mov r7, r1 - add r8, r1, r3, lsl #3 - mov r4, r2 - add r6, r0, r2, lsl #3 -_neon_transpose_row: - vld1.32 {q8,q9}, [r0, :128]! -@ vld1.32 {q10,q11}, [r0, :128]! - vld1.32 {q12,q13}, [r6, :128]! -@ vld1.32 {q14,q15}, [r6, :128]! - sub r4, r4, #4 - cmp r4, #0 - vswp d17,d24 - vswp d19,d26 - vswp d21,d28 - vswp d23,d30 - vst1.32 {q8}, [r7, :128] - vst1.32 {q12}, [r8, :128] - add r7, r7, r3, lsl #4 - add r8, r8, r3, lsl #4 - vst1.32 {q9}, [r7, :128] - vst1.32 {q13}, [r8, :128] - add r7, r7, r3, lsl #4 - add r8, r8, r3, lsl #4 -@@vst1.32 {q10}, [r7, :128] -@@vst1.32 {q14}, [r8, :128] -@@add r7, r7, r3, lsl #4 -@@add r8, r8, r3, lsl #4 -@@vst1.32 {q11}, [r7, :128] -@@vst1.32 {q15}, [r8, :128] -@@add r7, r7, r3, lsl #4 -@@add r8, r8, r3, lsl #4 - bne _neon_transpose_row - sub r5, r5, #2 - cmp r5, #0 - add r0, r0, r2, lsl #3 - add r1, r1, #16 - bne _neon_transpose_col - @ vpop {q8-q9} - pop {r4-r8} - bx lr - - .align 4 + push {r4-r6, lr} + mov r5, r3 +1: + mov ip, r1 + add lr, r1, r3, lsl #3 + mov r4, r2 + add r6, r0, r2, lsl #3 +2: + vld1.32 {q8, q9}, [r0, :128]! + vld1.32 {q12,q13}, [r6, :128]! + subs r4, r4, #4 + vswp d17, d24 + vswp d19, d26 + vswp d21, d28 + vswp d23, d30 + vst1.32 {q8}, [ip, :128] + vst1.32 {q12}, [lr, :128] + add ip, ip, r3, lsl #4 + add lr, lr, r3, lsl #4 + vst1.32 {q9}, [ip, :128] + vst1.32 {q13}, [lr, :128] + add ip, ip, r3, lsl #4 + add lr, lr, r3, lsl #4 + bne 2b + subs r5, r5, #2 + add r0, r0, r2, lsl #3 + add r1, r1, #16 + bne 1b + pop {r4-r6, pc} + + .align 4 #ifdef __APPLE__ - .globl _neon_transpose_to_buf + .globl _neon_transpose_to_buf _neon_transpose_to_buf: #else - .globl neon_transpose_to_buf + .globl neon_transpose_to_buf neon_transpose_to_buf: #endif - push {r4-r10} - mov r5, #8 -_neon_transpose_to_buf_col: - mov r4, #8 - add r6, r0, r2, lsl #3 - mov r7, r1 - add r8, r1, #64 - add r9, r1, #128 - add r10, r1, #192 -_neon_transpose_to_buf_row: - vld1.32 {q8,q9}, [r0, :128]! - vld1.32 {q12,q13}, [r6, :128]! - sub r4, r4, #4 - cmp r4, #0 - vswp d17,d24 - vswp d19,d26 - vst1.32 {q8}, [r7, :128] - vst1.32 {q12}, [r8, :128] - vst1.32 {q9}, [r9, :128] - vst1.32 {q13}, [r10, :128] - add r7, r7, #256 - add r8, r8, #256 - add r9, r9, #256 - add r10, r10, #256 - bne _neon_transpose_to_buf_row - sub r5, r5, #2 - cmp r5, #0 - sub r0, r0, #64 - add r0, r0, r2, lsl #4 - add r1, r1, #16 - bne _neon_transpose_to_buf_col - pop {r4-r10} - bx lr + push {r4-r8, lr} + vpush {q4-q7} + + @ initialize and preload (TODO: optimize) + pld [r0] + add r4, r0, r2, lsl #3 + lsl ip, r2, #4 + pld [r4] + pld [r0, ip] + add r6, r1, r2, lsl #3 + pld [r4, ip] + add r7, r6, r2, lsl #3 + pld [r0, ip, lsl #1] + pld [r0, ip, lsl #2] + add r8, r7, r2, lsl #3 + pld [r4, ip, lsl #1] + pld [r4, ip, lsl #2] + sub ip, ip, #32 + lsl r3, ip, #1 + add r3, r3, #16 + + @ matrix 0&2 row 0-1 + vld1.32 {q0, q1}, [r0, :128]! + vld1.32 {q2, q3}, [r4, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r6, :128]! + vst1.32 {q1}, [r7, :128]! + vst1.32 {q3}, [r8, :128]! + + @ matrix 1&3 row 0-1 + vld1.32 {q4, q5}, [r0, :128], ip + vld1.32 {q6, q7}, [r4, :128], ip + vswp d9, d12 + vswp d11, d14 + + @ matrix 0&2, row 2-3 + vld1.32 {q0, q1}, [r0, :128]! + vld1.32 {q2, q3}, [r4, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r6, :128]! + vst1.32 {q1}, [r7, :128]! + vst1.32 {q3}, [r8, :128]! + + @ matrix 1&3, row 2-3 + vld1.32 {q8, q9}, [r0, :128], ip + vld1.32 {q10, q11}, [r4, :128], ip + vswp d17, d20 + vswp d19, d22 + + @ matrix 0&2, row 4-5 + vld1.32 {q0, q1}, [r0, :128]! + vld1.32 {q2, q3}, [r4, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r6, :128]! + vst1.32 {q1}, [r7, :128]! + vst1.32 {q3}, [r8, :128]! + + @ matrix 1&3, row 4-5 + vld1.32 {q12, q13}, [r0, :128], ip + vld1.32 {q14, q15}, [r4, :128], ip + vswp d25, d28 + vswp d27, d30 + + @ matrix 0&2, row 6-7 + vld1.32 {q0, q1}, [r0, :128]! + vld1.32 {q2, q3}, [r4, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128], r3 + vst1.32 {q2}, [r6, :128], r3 + vst1.32 {q1}, [r7, :128], r3 + vst1.32 {q3}, [r8, :128], r3 + + @ matrix 1&3, row 6-7 + vld1.32 {q0, q1}, [r0, :128] + vld1.32 {q2, q3}, [r4, :128] + vswp d1, d4 + vswp d3, d6 + + @ these could be replaced with VSTM, but that requires swaps + vst1.32 {q4}, [r1, :128]! + vst1.32 {q8}, [r1, :128]! + vst1.32 {q12}, [r1, :128]! + vst1.32 {q0}, [r1, :128] + + vst1.32 {q6}, [r6, :128]! + vst1.32 {q10}, [r6, :128]! + vst1.32 {q14}, [r6, :128]! + vst1.32 {q2}, [r6, :128] + + vst1.32 {q5}, [r7, :128]! + vst1.32 {q9}, [r7, :128]! + vst1.32 {q13}, [r7, :128]! + vst1.32 {q1}, [r7, :128] + + vst1.32 {q7}, [r8, :128]! + vst1.32 {q11}, [r8, :128]! + vst1.32 {q15}, [r8, :128]! + vst1.32 {q3}, [r8, :128] + + vpop {q4-q7} + pop {r4-r8, pc} -- cgit v1.1 From 7f74c87546f97b8d5864e1f20da54d226de9030b Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 28 Mar 2016 22:04:23 +0300 Subject: "transpose_buf" is not used --- src/ffts_nd.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 2bde9c4..c964d7f 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -2,6 +2,7 @@ This file is part of FFTS -- The Fastest Fourier Transform in the South +Copyright (c) 2016, Jukka Ojanen Copyright (c) 2012, Anthony M. Blake Copyright (c) 2012, The University of Waikato @@ -43,7 +44,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define TSIZE 8 -static void ffts_free_nd(ffts_plan_t *p) +static void +ffts_free_nd(ffts_plan_t *p) { if (p->plans) { int i; @@ -82,14 +84,11 @@ static void ffts_free_nd(ffts_plan_t *p) ffts_aligned_free(p->buf); } - if (p->transpose_buf) { - ffts_aligned_free(p->transpose_buf); - } - free(p); } -static void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t *buf) +static void +ffts_transpose(uint64_t *in, uint64_t *out, int w, int h) { #ifdef HAVE_NEON #if 0 @@ -196,7 +195,8 @@ static void ffts_transpose(uint64_t *in, uint64_t *out, int w, int h, uint64_t * #endif } -static void ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) +static void +ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) { uint64_t *din = (uint64_t*) in; uint64_t *buf = p->buf; @@ -211,7 +211,7 @@ static void ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) plan->transform(plan, din + (j * p->Ms[0]), buf + (j * p->Ms[0])); } - ffts_transpose(buf, dout, p->Ms[0], p->Ns[0], p->transpose_buf); + ffts_transpose(buf, dout, p->Ms[0], p->Ns[0]); for (i = 1; i < p->rank; i++) { plan = p->plans[i]; @@ -220,7 +220,7 @@ static void ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) plan->transform(plan, dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); } - ffts_transpose(buf, dout, p->Ms[i], p->Ns[i], p->transpose_buf); + ffts_transpose(buf, dout, p->Ms[i], p->Ns[i]); } } @@ -261,11 +261,6 @@ ffts_init_nd(int rank, size_t *Ns, int sign) goto cleanup; } - p->transpose_buf = ffts_aligned_malloc(2 * 8 * 8 * sizeof(float)); - if (!p->transpose_buf) { - goto cleanup; - } - p->plans = calloc(rank, sizeof(*p->plans)); if (!p->plans) { goto cleanup; -- cgit v1.1 From ab556f87890e483d1a0814096cad491de270b6ad Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Mon, 28 Mar 2016 23:45:33 +0300 Subject: Rename neon_transpose to neon_transpose4, 4x4 tiled matrix transpose. Rename neon_transpose_to_buf to neon_transpose8, 8x8 tiled matrix transpose. --- src/ffts_nd.c | 4 ++-- src/neon.h | 4 ++-- src/neon.s | 16 ++++++++-------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index c964d7f..ebce101 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -92,13 +92,13 @@ ffts_transpose(uint64_t *in, uint64_t *out, int w, int h) { #ifdef HAVE_NEON #if 0 - neon_transpose(in, out, w, h); + neon_transpose4(in, out, w, h); #else size_t i, j; for (j = 0; j < h; j += 8) { for (i = 0; i < w; i += 8) { - neon_transpose_to_buf(in + j*w + i, out + i*h + j, w); + neon_transpose8(in + j*w + i, out + i*h + j, w, h); } } #endif diff --git a/src/neon.h b/src/neon.h index 66dcd4b..f719159 100644 --- a/src/neon.h +++ b/src/neon.h @@ -45,8 +45,8 @@ void neon_eo(); void neon_oe(); void neon_end(); -void neon_transpose(uint64_t *in, uint64_t *out, int w, int h); -void neon_transpose_to_buf(uint64_t *in, uint64_t *out, int w); +void neon_transpose4(uint64_t *in, uint64_t *out, int w, int h); +void neon_transpose8(uint64_t *in, uint64_t *out, int w, int h); void neon_static_e_f(ffts_plan_t*, const void*, void*); void neon_static_o_f(ffts_plan_t*, const void*, void*); diff --git a/src/neon.s b/src/neon.s index 1e7fb92..9b6ccab 100644 --- a/src/neon.s +++ b/src/neon.s @@ -638,11 +638,11 @@ neon_end: .align 4 #ifdef __APPLE__ - .globl _neon_transpose -_neon_transpose: + .globl _neon_transpose4 +_neon_transpose4: #else - .globl neon_transpose -neon_transpose: + .globl neon_transpose4 +neon_transpose4: #endif push {r4-r6, lr} mov r5, r3 @@ -676,11 +676,11 @@ neon_transpose: .align 4 #ifdef __APPLE__ - .globl _neon_transpose_to_buf -_neon_transpose_to_buf: + .globl _neon_transpose8 +_neon_transpose8: #else - .globl neon_transpose_to_buf -neon_transpose_to_buf: + .globl neon_transpose8 +neon_transpose8: #endif push {r4-r8, lr} vpush {q4-q7} -- cgit v1.1 From e464bcb622d5ab1426b14a2314d852fc6e1539e1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 29 Mar 2016 17:01:01 +0300 Subject: Fix neon_transpose8 for non-square matrices, move loops to assembly side, about 5% faster --- src/ffts_nd.c | 8 +- src/neon.s | 248 +++++++++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 194 insertions(+), 62 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index ebce101..5745cd5 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -94,13 +94,7 @@ ffts_transpose(uint64_t *in, uint64_t *out, int w, int h) #if 0 neon_transpose4(in, out, w, h); #else - size_t i, j; - - for (j = 0; j < h; j += 8) { - for (i = 0; i < w; i += 8) { - neon_transpose8(in + j*w + i, out + i*h + j, w, h); - } - } + neon_transpose8(in, out, w, h); #endif #else #ifdef HAVE_SSE diff --git a/src/neon.s b/src/neon.s index 9b6ccab..7486b63 100644 --- a/src/neon.s +++ b/src/neon.s @@ -682,111 +682,249 @@ _neon_transpose8: .globl neon_transpose8 neon_transpose8: #endif - push {r4-r8, lr} + push {r4-r12, lr} vpush {q4-q7} - @ initialize and preload (TODO: optimize) + @ initialize + lsl r2, r2, #3 + mul lr, r2, r3 + lsl r3, r3, #5 + add r4, r0, r2 + lsl ip, r2, #1 + add r5, r1, r3, lsr #2 + add r6, r1, r3, lsr #1 + add r7, r5, r3, lsr #1 + sub lr, r3, lr + sub ip, ip, #64 + sub r8, r3, #48 + add lr, lr, #16 +1: + @ process all but the last one + subs r11, r2, #64 + + @ prefetch next rows 0-5 pld [r0] - add r4, r0, r2, lsl #3 - lsl ip, r2, #4 pld [r4] - pld [r0, ip] - add r6, r1, r2, lsl #3 - pld [r4, ip] - add r7, r6, r2, lsl #3 - pld [r0, ip, lsl #1] - pld [r0, ip, lsl #2] - add r8, r7, r2, lsl #3 - pld [r4, ip, lsl #1] - pld [r4, ip, lsl #2] - sub ip, ip, #32 - lsl r3, ip, #1 - add r3, r3, #16 + pld [r0, r2, lsl #1] + pld [r4, r2, lsl #1] + pld [r0, r2, lsl #2] + pld [r4, r2, lsl #2] + @ if there is only the last one + beq 3f +2: @ matrix 0&2 row 0-1 vld1.32 {q0, q1}, [r0, :128]! vld1.32 {q2, q3}, [r4, :128]! vswp d1, d4 vswp d3, d6 vst1.32 {q0}, [r1, :128]! - vst1.32 {q2}, [r6, :128]! - vst1.32 {q1}, [r7, :128]! - vst1.32 {q3}, [r8, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! @ matrix 1&3 row 0-1 - vld1.32 {q4, q5}, [r0, :128], ip - vld1.32 {q6, q7}, [r4, :128], ip + vld1.32 {q4, q5}, [r0, :128]! + vld1.32 {q6, q7}, [r4, :128]! vswp d9, d12 vswp d11, d14 + @ prefetch next rows 0-1 + pld [r0] + pld [r4] + add r9, r0, ip + add r10, r4, ip + @ matrix 0&2, row 2-3 - vld1.32 {q0, q1}, [r0, :128]! - vld1.32 {q2, q3}, [r4, :128]! + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! vswp d1, d4 vswp d3, d6 vst1.32 {q0}, [r1, :128]! - vst1.32 {q2}, [r6, :128]! - vst1.32 {q1}, [r7, :128]! - vst1.32 {q3}, [r8, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! @ matrix 1&3, row 2-3 - vld1.32 {q8, q9}, [r0, :128], ip - vld1.32 {q10, q11}, [r4, :128], ip + vld1.32 {q8, q9}, [r9, :128]! + vld1.32 {q10, q11}, [r10, :128]! vswp d17, d20 vswp d19, d22 + @ prefetch next rows 2-3 + pld [r9] + pld [r10] + add r9, r9, ip + add r10, r10, ip + @ matrix 0&2, row 4-5 - vld1.32 {q0, q1}, [r0, :128]! - vld1.32 {q2, q3}, [r4, :128]! + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! vswp d1, d4 vswp d3, d6 vst1.32 {q0}, [r1, :128]! - vst1.32 {q2}, [r6, :128]! - vst1.32 {q1}, [r7, :128]! - vst1.32 {q3}, [r8, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! @ matrix 1&3, row 4-5 - vld1.32 {q12, q13}, [r0, :128], ip - vld1.32 {q14, q15}, [r4, :128], ip + vld1.32 {q12, q13}, [r9, :128]! + vld1.32 {q14, q15}, [r10, :128]! vswp d25, d28 vswp d27, d30 + @ prefetch next rows 4-5 + pld [r9] + pld [r10] + add r9, r9, ip + add r10, r10, ip + @ matrix 0&2, row 6-7 + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128], r8 + vst1.32 {q2}, [r5, :128], r8 + vst1.32 {q1}, [r6, :128], r8 + vst1.32 {q3}, [r7, :128], r8 + + @ matrix 1&3, row 6-7 + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! + vswp d1, d4 + vswp d3, d6 + + @ prefetch next rows 6-7 + pld [r9] + pld [r10] + + subs r11, r11, #64 + + @ these could be replaced with VSTM, but that requires swaps + vst1.32 {q4}, [r1, :128]! + vst1.32 {q8}, [r1, :128]! + vst1.32 {q12}, [r1, :128]! + vst1.32 {q0}, [r1, :128], r8 + + vst1.32 {q6}, [r5, :128]! + vst1.32 {q10}, [r5, :128]! + vst1.32 {q14}, [r5, :128]! + vst1.32 {q2}, [r5, :128], r8 + + vst1.32 {q5}, [r6, :128]! + vst1.32 {q9}, [r6, :128]! + vst1.32 {q13}, [r6, :128]! + vst1.32 {q1}, [r6, :128], r8 + + vst1.32 {q7}, [r7, :128]! + vst1.32 {q11}, [r7, :128]! + vst1.32 {q15}, [r7, :128]! + vst1.32 {q3}, [r7, :128], r8 + + @ process all but the last on row + bne 2b +3: + @ process the last one + subs r3, r3, #256 + + @ matrix 0&2 row 0-1 vld1.32 {q0, q1}, [r0, :128]! vld1.32 {q2, q3}, [r4, :128]! vswp d1, d4 vswp d3, d6 - vst1.32 {q0}, [r1, :128], r3 - vst1.32 {q2}, [r6, :128], r3 - vst1.32 {q1}, [r7, :128], r3 - vst1.32 {q3}, [r8, :128], r3 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! + + @ matrix 1&3 row 0-1 + vld1.32 {q4, q5}, [r0, :128]! + vld1.32 {q6, q7}, [r4, :128]! + vswp d9, d12 + vswp d11, d14 + add r9, r0, ip + add r10, r4, ip + + @ matrix 0&2, row 2-3 + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! + + @ matrix 1&3, row 2-3 + vld1.32 {q8, q9}, [r9, :128]! + vld1.32 {q10, q11}, [r10, :128]! + vswp d17, d20 + vswp d19, d22 + add r9, r9, ip + add r10, r10, ip + + @ matrix 0&2, row 4-5 + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128]! + vst1.32 {q2}, [r5, :128]! + vst1.32 {q1}, [r6, :128]! + vst1.32 {q3}, [r7, :128]! + + @ matrix 1&3, row 4-5 + vld1.32 {q12, q13}, [r9, :128]! + vld1.32 {q14, q15}, [r10, :128]! + vswp d25, d28 + vswp d27, d30 + add r9, r9, ip + add r10, r10, ip + + @ matrix 0&2, row 6-7 + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! + vswp d1, d4 + vswp d3, d6 + vst1.32 {q0}, [r1, :128], r8 + vst1.32 {q2}, [r5, :128], r8 + vst1.32 {q1}, [r6, :128], r8 + vst1.32 {q3}, [r7, :128], r8 @ matrix 1&3, row 6-7 - vld1.32 {q0, q1}, [r0, :128] - vld1.32 {q2, q3}, [r4, :128] + vld1.32 {q0, q1}, [r9, :128]! + vld1.32 {q2, q3}, [r10, :128]! vswp d1, d4 vswp d3, d6 + @ next row starts right after + mov r0, r10 + add r4, r10, r2 + @ these could be replaced with VSTM, but that requires swaps vst1.32 {q4}, [r1, :128]! vst1.32 {q8}, [r1, :128]! vst1.32 {q12}, [r1, :128]! - vst1.32 {q0}, [r1, :128] + vst1.32 {q0}, [r1, :128], lr + + vst1.32 {q6}, [r5, :128]! + vst1.32 {q10}, [r5, :128]! + vst1.32 {q14}, [r5, :128]! + vst1.32 {q2}, [r5, :128], lr - vst1.32 {q6}, [r6, :128]! - vst1.32 {q10}, [r6, :128]! - vst1.32 {q14}, [r6, :128]! - vst1.32 {q2}, [r6, :128] + vst1.32 {q5}, [r6, :128]! + vst1.32 {q9}, [r6, :128]! + vst1.32 {q13}, [r6, :128]! + vst1.32 {q1}, [r6, :128], lr - vst1.32 {q5}, [r7, :128]! - vst1.32 {q9}, [r7, :128]! - vst1.32 {q13}, [r7, :128]! - vst1.32 {q1}, [r7, :128] + vst1.32 {q7}, [r7, :128]! + vst1.32 {q11}, [r7, :128]! + vst1.32 {q15}, [r7, :128]! + vst1.32 {q3}, [r7, :128], lr - vst1.32 {q7}, [r8, :128]! - vst1.32 {q11}, [r8, :128]! - vst1.32 {q15}, [r8, :128]! - vst1.32 {q3}, [r8, :128] + @ process all columns + bne 1b vpop {q4-q7} - pop {r4-r8, pc} + pop {r4-r12, pc} -- cgit v1.1 From 52c7d506e13dca5c92692e95aba5dfa678b2acb0 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 5 Apr 2016 14:44:51 +0300 Subject: Add notes about data layout to ffts.h --- include/ffts.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/include/ffts.h b/include/ffts.h index d187e36..cc85a88 100644 --- a/include/ffts.h +++ b/include/ffts.h @@ -56,12 +56,22 @@ extern "C" { # endif #endif -#define POSITIVE_SIGN 1 -#define NEGATIVE_SIGN -1 +/* The direction of the transform + (i.e, the sign of the exponent in the transform.) +*/ +#define FFTS_FORWARD (-1) +#define FFTS_BACKWARD (+1) struct _ffts_plan_t; typedef struct _ffts_plan_t ffts_plan_t; +/* Complex data is stored in the interleaved format + (i.e, the real and imaginary parts composing each + element of complex data are stored adjacently in memory) + + The multi-dimensional arrays passed are expected to be + stored as a single contiguous block in row-major order +*/ FFTS_API ffts_plan_t* ffts_init_1d(size_t N, int sign); @@ -71,8 +81,10 @@ ffts_init_2d(size_t N1, size_t N2, int sign); FFTS_API ffts_plan_t* ffts_init_nd(int rank, size_t *Ns, int sign); -/* For real transforms, sign == -1 implies a real-to-complex forwards tranform, - and sign == 1 implies a complex-to-real backwards transform. +/* For real transforms, sign == FFTS_FORWARD implies a real-to-complex + forwards tranform, and sign == FFTS_BACKWARD implies a complex-to-real + backwards transform. + The output of a real-to-complex transform is N/2+1 complex numbers, where the redundant outputs have been omitted. */ -- cgit v1.1 From c71724a0dc7536ef160732b5ed6ec1f4ef2c0ff9 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 5 Apr 2016 14:49:07 +0300 Subject: Fix ffts_init_nd() for 3 or higher rank complex FFTs --- src/ffts_nd.c | 55 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 5745cd5..49c6229 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -48,16 +48,14 @@ static void ffts_free_nd(ffts_plan_t *p) { if (p->plans) { - int i; + int i, j; for (i = 0; i < p->rank; i++) { ffts_plan_t *plan = p->plans[i]; if (plan) { - int k; - - for (k = 0; k < i; k++) { - if (p->Ms[i] == p->Ms[k]) { + for (j = 0; j < i; j++) { + if (p->Ns[i] == p->Ns[j]) { plan = NULL; break; } @@ -201,20 +199,20 @@ ffts_execute_nd(ffts_plan_t *p, const void *in, void *out) size_t j; plan = p->plans[0]; - for (j = 0; j < p->Ns[0]; j++) { - plan->transform(plan, din + (j * p->Ms[0]), buf + (j * p->Ms[0])); + for (j = 0; j < p->Ms[0]; j++) { + plan->transform(plan, din + (j * p->Ns[0]), buf + (j * p->Ns[0])); } - ffts_transpose(buf, dout, p->Ms[0], p->Ns[0]); + ffts_transpose(buf, dout, p->Ns[0], p->Ms[0]); for (i = 1; i < p->rank; i++) { plan = p->plans[i]; - for (j = 0; j < p->Ns[i]; j++) { - plan->transform(plan, dout + (j * p->Ms[i]), buf + (j * p->Ms[i])); + for (j = 0; j < p->Ms[i]; j++) { + plan->transform(plan, dout + (j * p->Ns[i]), buf + (j * p->Ns[i])); } - ffts_transpose(buf, dout, p->Ms[i], p->Ns[i]); + ffts_transpose(buf, dout, p->Ns[i], p->Ms[i]); } } @@ -222,8 +220,16 @@ FFTS_API ffts_plan_t* ffts_init_nd(int rank, size_t *Ns, int sign) { ffts_plan_t *p; - size_t vol; - int i; + size_t vol = 1; + int i, j; + + if (!Ns) { + return NULL; + } + + if (rank == 1) { + return ffts_init_1d(Ns[0], sign); + } p = calloc(1, sizeof(*p)); if (!p) { @@ -244,10 +250,11 @@ ffts_init_nd(int rank, size_t *Ns, int sign) goto cleanup; } - vol = p->Ns[0] = Ns[0]; - for (i = 1; i < rank; i++) { - p->Ns[i] = Ns[i]; - vol *= Ns[i]; + /* reverse the order */ + for (i = 0; i < rank; i++) { + size_t N = Ns[rank - i - 1]; + p->Ns[i] = N; + vol *= N; } p->buf = ffts_aligned_malloc(2 * vol * sizeof(float)); @@ -261,19 +268,17 @@ ffts_init_nd(int rank, size_t *Ns, int sign) } for (i = 0; i < rank; i++) { - int k; - p->Ms[i] = vol / p->Ns[i]; - for (k = 0; k < i; k++) { - if (p->Ms[k] == p->Ms[i]) { - p->plans[i] = p->plans[k]; + for (j = 0; j < i; j++) { + if (p->Ns[i] == p->Ns[j]) { + p->plans[i] = p->plans[j]; break; } } if (!p->plans[i]) { - p->plans[i] = ffts_init_1d(p->Ms[i], sign); + p->plans[i] = ffts_init_1d(p->Ns[i], sign); if (!p->plans) { goto cleanup; } @@ -292,7 +297,7 @@ ffts_init_2d(size_t N1, size_t N2, int sign) { size_t Ns[2]; - Ns[0] = N1; - Ns[1] = N2; + Ns[0] = N1; /* x */ + Ns[1] = N2; /* y */ return ffts_init_nd(2, Ns, sign); } -- cgit v1.1 From 8b2d55e5d6bd43eb45ca7da1595fccc401a22158 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Tue, 5 Apr 2016 17:52:13 +0300 Subject: Combine ffts_tranpose_scalar and ffts_transpose, and use ffts_transpose_scalar as native C fallback --- CMakeLists.txt | 2 + src/ffts_nd.c | 112 +---------------------------- src/ffts_nd.h | 67 ++++++++++-------- src/ffts_real.c | 4 +- src/ffts_real.h | 59 ++++++++-------- src/ffts_real_nd.c | 174 ++++++++++++++------------------------------- src/ffts_real_nd.h | 67 +++++++++--------- src/ffts_transpose.c | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/ffts_transpose.h | 46 ++++++++++++ 9 files changed, 399 insertions(+), 326 deletions(-) create mode 100644 src/ffts_transpose.c create mode 100644 src/ffts_transpose.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 58f402b..8c21185 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -351,6 +351,8 @@ set(FFTS_SOURCES src/ffts_real.c src/ffts_real_nd.c src/ffts_real_nd.h + src/ffts_transpose.c + src/ffts_transpose.h src/ffts_trig.c src/ffts_trig.h src/ffts_static.c diff --git a/src/ffts_nd.c b/src/ffts_nd.c index 49c6229..64220f1 100644 --- a/src/ffts_nd.c +++ b/src/ffts_nd.c @@ -34,15 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ffts_nd.h" #include "ffts_internal.h" - -#ifdef HAVE_NEON -#include "neon.h" -#include -#elif HAVE_SSE2 -#include -#endif - -#define TSIZE 8 +#include "ffts_transpose.h" static void ffts_free_nd(ffts_plan_t *p) @@ -86,108 +78,6 @@ ffts_free_nd(ffts_plan_t *p) } static void -ffts_transpose(uint64_t *in, uint64_t *out, int w, int h) -{ -#ifdef HAVE_NEON -#if 0 - neon_transpose4(in, out, w, h); -#else - neon_transpose8(in, out, w, h); -#endif -#else -#ifdef HAVE_SSE - uint64_t FFTS_ALIGN(64) tmp[TSIZE*TSIZE]; - int tx, ty; - /* int x; */ - int y; - int tw = w / TSIZE; - int th = h / TSIZE; - - for (ty = 0; ty < th; ty++) { - for (tx = 0; tx < tw; tx++) { - uint64_t *ip0 = in + w*TSIZE*ty + tx * TSIZE; - uint64_t *op0 = tmp; /* out + h*TSIZE*tx + ty*TSIZE; */ - - /* copy/transpose to tmp */ - for (y = 0; y < TSIZE; y += 2) { - /* for (x=0;x - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef FFTS_ND_H #define FFTS_ND_H +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + #include "ffts.h" #include -ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign); -ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign); +ffts_plan_t* +ffts_init_nd(int rank, size_t *Ns, int sign); + +ffts_plan_t* +ffts_init_2d(size_t N1, size_t N2, int sign); #endif /* FFTS_ND_H */ diff --git a/src/ffts_real.c b/src/ffts_real.c index 7f41069..0f87a12 100644 --- a/src/ffts_real.c +++ b/src/ffts_real.c @@ -641,9 +641,9 @@ ffts_init_1d_real(size_t N, int sign) } #ifdef HAVE_SSE3 - ffts_generate_table_1d_real_32f(p, sign, 1); + ffts_generate_table_1d_real_32f(p, sign, 1); #else - ffts_generate_table_1d_real_32f(p, sign, 0); + ffts_generate_table_1d_real_32f(p, sign, 0); #endif return p; diff --git a/src/ffts_real.h b/src/ffts_real.h index 81ca80f..61d03d4 100644 --- a/src/ffts_real.h +++ b/src/ffts_real.h @@ -1,33 +1,33 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -41,6 +41,7 @@ #include "ffts.h" #include -ffts_plan_t *ffts_init_1d_real(size_t N, int sign); +ffts_plan_t* +ffts_init_1d_real(size_t N, int sign); #endif /* FFTS_REAL_H */ diff --git a/src/ffts_real_nd.c b/src/ffts_real_nd.c index 545e8f0..89ef7f7 100644 --- a/src/ffts_real_nd.c +++ b/src/ffts_real_nd.c @@ -1,80 +1,67 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "ffts_real_nd.h" #include "ffts_real.h" #include "ffts_internal.h" +#include "ffts_transpose.h" -#ifdef __ARM_NEON__ -#include "neon.h" -#endif - -#ifdef HAVE_NEON -#include -#elif HAVE_SSE -#include -#endif - -#include - -static void ffts_free_nd_real(ffts_plan_t *p) +static void +ffts_free_nd_real(ffts_plan_t *p) { if (p->plans) { - int i; + int i, j; for (i = 0; i < p->rank; i++) { ffts_plan_t *plan = p->plans[i]; - if (plan) { - int j; - - for (j = i + 1; j < p->rank; j++) { - if (plan == p->plans[j]) { - p->plans[j] = NULL; - } - } - - ffts_free(plan); - } + if (plan) { + for (j = 0; j < i; j++) { + if (p->Ns[i] == p->Ns[j]) { + plan = NULL; + break; + } + } + + if (plan) { + ffts_free(plan); + } + } } free(p->plans); } - if (p->transpose_buf) { - ffts_aligned_free(p->transpose_buf); - } - if (p->buf) { ffts_aligned_free(p->buf); } @@ -90,59 +77,8 @@ static void ffts_free_nd_real(ffts_plan_t *p) free(p); } -static void ffts_scalar_transpose(uint64_t *src, uint64_t *dst, int w, int h, uint64_t *buf) -{ - const int bw = 1; - const int bh = 8; - int i = 0, j = 0; - - for (; i <= h - bh; i += bh) { - for (j = 0; j <= w - bw; j += bw) { - uint64_t const *ib = &src[w*i + j]; - uint64_t *ob = &dst[h*j + i]; - - uint64_t s_0_0 = ib[0*w + 0]; - uint64_t s_1_0 = ib[1*w + 0]; - uint64_t s_2_0 = ib[2*w + 0]; - uint64_t s_3_0 = ib[3*w + 0]; - uint64_t s_4_0 = ib[4*w + 0]; - uint64_t s_5_0 = ib[5*w + 0]; - uint64_t s_6_0 = ib[6*w + 0]; - uint64_t s_7_0 = ib[7*w + 0]; - - ob[0*h + 0] = s_0_0; - ob[0*h + 1] = s_1_0; - ob[0*h + 2] = s_2_0; - ob[0*h + 3] = s_3_0; - ob[0*h + 4] = s_4_0; - ob[0*h + 5] = s_5_0; - ob[0*h + 6] = s_6_0; - ob[0*h + 7] = s_7_0; - } - } - - if (i < h) { - int i1; - - for (i1 = 0; i1 < w; i1++) { - for (j = i; j < h; j++) { - dst[i1*h + j] = src[j*w + i1]; - } - } - } - - if (j < w) { - int j1; - - for (i = j; i < w; i++) { - for (j1 = 0; j1 < h; j1++) { - dst[i*h + j1] = src[j1*w + i]; - } - } - } -} - -static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) +static void +ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) { const size_t Ms0 = p->Ms[0]; const size_t Ns0 = p->Ns[0]; @@ -150,7 +86,6 @@ static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) uint32_t *din = (uint32_t*) in; uint64_t *buf = p->buf; uint64_t *dout = (uint64_t*) out; - uint64_t *transpose_buf = (uint64_t*) p->transpose_buf; ffts_plan_t *plan; int i; @@ -161,7 +96,7 @@ static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) plan->transform(plan, din + (j * Ms0), buf + (j * (Ms0 / 2 + 1))); } - ffts_scalar_transpose(buf, dout, Ms0 / 2 + 1, Ns0, transpose_buf); + ffts_transpose(buf, dout, Ms0 / 2 + 1, Ns0); for (i = 1; i < p->rank; i++) { const size_t Ms = p->Ms[i]; @@ -173,11 +108,12 @@ static void ffts_execute_nd_real(ffts_plan_t *p, const void *in, void *out) plan->transform(plan, dout + (j * Ms), buf + (j * Ms)); } - ffts_scalar_transpose(buf, dout, Ms, Ns, transpose_buf); + ffts_transpose(buf, dout, Ms, Ns); } } -static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) +static void +ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) { const size_t Ms0 = p->Ms[0]; const size_t Ms1 = p->Ms[1]; @@ -187,7 +123,6 @@ static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) uint64_t *din = (uint64_t*) in; uint64_t *buf = p->buf; uint64_t *buf2; - uint64_t *transpose_buf = (uint64_t*) p->transpose_buf; float *doutr = (float*) out; ffts_plan_t *plan; @@ -203,14 +138,14 @@ static void ffts_execute_nd_real_inv(ffts_plan_t *p, const void *in, void *out) buf2 = buf + vol; - ffts_scalar_transpose(din, buf, Ms0, Ns0, transpose_buf); + ffts_transpose(din, buf, Ms0, Ns0); plan = p->plans[0]; for (j = 0; j < Ms0; j++) { plan->transform(plan, buf + (j * Ns0), buf2 + (j * Ns0)); } - ffts_scalar_transpose(buf2, buf, Ns0, Ms0, transpose_buf); + ffts_transpose(buf2, buf, Ns0, Ms0); plan = p->plans[1]; for (j = 0; j < Ms1; j++) { @@ -267,11 +202,6 @@ ffts_init_nd_real(int rank, size_t *Ns, int sign) goto cleanup; } - p->transpose_buf = ffts_aligned_malloc(2 * 8 * 8 * sizeof(float)); - if (!p->transpose_buf) { - goto cleanup; - } - p->plans = (ffts_plan_t**) calloc(rank, sizeof(*p->plans)); if (!p->plans) { goto cleanup; diff --git a/src/ffts_real_nd.h b/src/ffts_real_nd.h index 22a708d..fac607b 100644 --- a/src/ffts_real_nd.h +++ b/src/ffts_real_nd.h @@ -1,33 +1,33 @@ - /* - - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -41,7 +41,10 @@ #include "ffts.h" #include -ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign); -ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign); +ffts_plan_t* +ffts_init_nd_real(int rank, size_t *Ns, int sign); + +ffts_plan_t* +ffts_init_2d_real(size_t N1, size_t N2, int sign); #endif /* FFTS_REAL_ND_H */ \ No newline at end of file diff --git a/src/ffts_transpose.c b/src/ffts_transpose.c new file mode 100644 index 0000000..272cb48 --- /dev/null +++ b/src/ffts_transpose.c @@ -0,0 +1,194 @@ +/* + +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2016, Jukka Ojanen +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "ffts_transpose.h" +#include "ffts_internal.h" + +#ifdef HAVE_NEON +#include "neon.h" +#include +#elif HAVE_SSE2 +#include +#endif + +#define TSIZE 8 + +void +ffts_transpose(uint64_t *in, uint64_t *out, int w, int h) +{ +#ifdef HAVE_NEON +#if 0 + neon_transpose4(in, out, w, h); +#else + neon_transpose8(in, out, w, h); +#endif +#elif HAVE_SSE2 + uint64_t FFTS_ALIGN(64) tmp[TSIZE*TSIZE]; + int tx, ty; + /* int x; */ + int y; + int tw = w / TSIZE; + int th = h / TSIZE; + + for (ty = 0; ty < th; ty++) { + for (tx = 0; tx < tw; tx++) { + uint64_t *ip0 = in + w*TSIZE*ty + tx * TSIZE; + uint64_t *op0 = tmp; /* out + h*TSIZE*tx + ty*TSIZE; */ + + /* copy/transpose to tmp */ + for (y = 0; y < TSIZE; y += 2) { + /* for (x=0;x +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef FFTS_TRANSPOSE_H +#define FFTS_TRANSPOSE_H + +#if defined (_MSC_VER) && (_MSC_VER >= 1020) +#pragma once +#endif + +#include "ffts_internal.h" + +void +ffts_transpose(uint64_t *in, uint64_t *out, int w, int h); + +#endif /* FFTS_TRANSPOSE_H */ -- cgit v1.1 From 78d328e98edf8b9bb5c272f4c2f900466c8b082a Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 6 Apr 2016 17:13:15 +0300 Subject: Silence the compiler warnings --- src/ffts_static.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/ffts_static.c b/src/ffts_static.c index e1b2f6b..09de6d7 100644 --- a/src/ffts_static.c +++ b/src/ffts_static.c @@ -794,8 +794,11 @@ ffts_small_forward8_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - double *LUT8 = (double*) p->ws + p->ws_is[0]; +// V4SF r0_1, r2_3, r4_5, r6_7; +// double *LUT8 = (double*) p->ws + p->ws_is[0]; + (void) p; + (void) din; + (void) dout; #if MACROS_READY L_4_2(0, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); @@ -825,8 +828,12 @@ ffts_small_backward8_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - V4SF r0_1, r2_3, r4_5, r6_7; - double *LUT8 = (double*) p->ws + p->ws_is[0]; +// V4SF r0_1, r2_3, r4_5, r6_7; +// double *LUT8 = (double*) p->ws + p->ws_is[0]; + (void) p; + (void) din; + (void) dout; + #if MACROS_READY L_4_2(1, din, din+8, din+4, din+12, &r0_1, &r2_3, &r4_5, &r6_7); @@ -860,8 +867,11 @@ ffts_small_forward16_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - double *LUT8 = (double*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; +// double *LUT8 = (double*) p->ws; +// V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + (void) p; + (void) din; + (void) dout; #ifdef MACROS_READY L_4_4(0, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); @@ -899,8 +909,11 @@ ffts_small_backward16_64f(ffts_plan_t *p, const void *in, void *out) { const double *din = (const double*) in; double *dout = (double*) out; - double *LUT8 = (double*) p->ws; - V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; +// double *LUT8 = (double*) p->ws; +// V4SF r0_1, r2_3, r4_5, r6_7, r8_9, r10_11, r12_13, r14_15; + (void) p; + (void) din; + (void) dout; #ifdef MACROS_READY L_4_4(1, din+0, din+16, din+8, din+24, &r0_1, &r2_3, &r8_9, &r10_11); -- cgit v1.1 From c85bf4b4a7a1199d9c48c24ec5a49ea0b16e1af1 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Wed, 6 Apr 2016 17:34:23 +0300 Subject: Try to remove some of the hard coded offsets to _ffts_plan_t --- src/codegen_sse.h | 12 +++++++---- src/ffts_internal.h | 60 +++++++++++++++++++++++++++-------------------------- 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/src/codegen_sse.h b/src/codegen_sse.h index d15b316..e9819f1 100644 --- a/src/codegen_sse.h +++ b/src/codegen_sse.h @@ -437,10 +437,12 @@ generate_leaf_init(insns_t **fp, uint32_t loop_count) x86_clear_reg(ins, X86_EAX); /* set "pointer" to offsets */ - x64_mov_reg_membase(ins, X64_R9, X64_RCX, 0x0, 8); + x64_mov_reg_membase(ins, X64_R9, X64_RCX, + offsetof(struct _ffts_plan_t, offsets), 8); /* set "pointer" to constants */ - x64_mov_reg_membase(ins, X64_RSI, X64_RCX, 0xE0, 8); + x64_mov_reg_membase(ins, X64_RSI, X64_RCX, + offsetof(struct _ffts_plan_t, constants), 8); /* use XMM3 for sign change */ x64_sse_movaps_reg_membase(ins, X64_XMM3, X64_RSI, 0); @@ -454,10 +456,12 @@ generate_leaf_init(insns_t **fp, uint32_t loop_count) x86_clear_reg(ins, X86_EAX); /* set "pointer" to offsets */ - x64_mov_reg_membase(ins, X64_R8, X64_RDI, 0x0, 8); + x64_mov_reg_membase(ins, X64_R8, X64_RDI, + offsetof(struct _ffts_plan_t, offsets), 8); /* set "pointer" to constants */ - x64_mov_reg_membase(ins, X64_R9, X64_RDI, 0xE0, 8); + x64_mov_reg_membase(ins, X64_R9, X64_RDI, + offsetof(struct _ffts_plan_t, constants), 8); /* align loop/jump destination */ ffts_align_mem16(&ins, 9); diff --git a/src/ffts_internal.h b/src/ffts_internal.h index 30e814b..7ae8789 100644 --- a/src/ffts_internal.h +++ b/src/ffts_internal.h @@ -1,33 +1,33 @@ /* - This file is part of FFTS -- The Fastest Fourier Transform in the South - - Copyright (c) 2012, Anthony M. Blake - Copyright (c) 2012, The University of Waikato - - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the organization nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +This file is part of FFTS -- The Fastest Fourier Transform in the South + +Copyright (c) 2012, Anthony M. Blake +Copyright (c) 2012, The University of Waikato + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of the organization nor the +names of its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ANTHONY M. BLAKE BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ @@ -116,7 +116,9 @@ struct _ffts_plan_t { size_t N; void *lastlut; - size_t *temporary_fix_as_dynamic_code_assumes_fixed_offset; +#ifdef __arm__ + size_t *temporary_fix_as_dynamic_code_assumes_fixed_offset; +#endif /** * Pointer to the dynamically generated function -- cgit v1.1 From 944d14c9151f6b20145de0cdae38e366e73c9432 Mon Sep 17 00:00:00 2001 From: Jukka Ojanen Date: Thu, 7 Apr 2016 12:01:57 +0300 Subject: If the system is not ARM or x86 based, we will have invalid set of compiler flags --- CMakeLists.txt | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8c21185..2028c03 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,11 +82,13 @@ if(HAVE_UNISTD_H) add_definitions(-DHAVE_UNISTD_H) endif(HAVE_UNISTD_H) +# backup flags +set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + # Determinate if we are cross-compiling if(NOT CMAKE_CROSSCOMPILING) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") # Determinate ARM architecture - set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) # Try to execute quietly without messages set(CMAKE_REQUIRED_QUIET 1) @@ -230,8 +232,6 @@ if(NOT CMAKE_CROSSCOMPILING) endif(NOT HARDFP_SUPPORTED) endif(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED) else() - set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) - # enable SSE code generation if(CMAKE_COMPILER_IS_GNUCC) set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse") @@ -294,6 +294,9 @@ else() # TODO: Add detections for compiler support and headers endif(NOT CMAKE_CROSSCOMPILING) +# restore flags +set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) + # compiler settings if(MSVC) # enable all warnings but also disable some.. -- cgit v1.1